this is the most up to date working #2

Merged
MassiveAtoms merged 26 commits from moretables into master 2020-04-17 16:47:27 +00:00
18 changed files with 6930 additions and 184 deletions
Showing only changes of commit 5c34ef6f10 - Show all commits

View File

@ -17,8 +17,7 @@ add_executable(studproject
./src/includes/tests.h ./src/includes/tests.h
./src/includes/aggregate_tests.h ./src/includes/aggregate_tests.h
./src/includes/3thparty/emilib/loguru.cpp ./src/includes/3thparty/emilib/loguru.cpp
main.cpp ./src/main.cpp
) )
target_link_libraries(studproject target_link_libraries(studproject

View File

@ -302,4 +302,52 @@ int_delete, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11:
int_insert, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 332, 351, 180, 308, 210, 445, 214, 245, 336, 490, 226, 245, 263, 578 int_insert, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 332, 351, 180, 308, 210, 445, 214, 245, 336, 490, 226, 245, 263, 578
int_succ_lookup, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 161, 212, 192, 248, 238, 430, 252, 209, 304, 331, 264, 263, 268, 402 int_succ_lookup, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 161, 212, 192, 248, 238, 430, 252, 209, 304, 331, 264, 263, 268, 402
int_nosucc_lookup, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 1630, 385, 182, 393, 208, 425, 209, 210, 369, 464, 234, 251, 284, 606 int_nosucc_lookup, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 1630, 385, 182, 393, 208, 425, 209, 210, 369, 464, 234, 251, 284, 606
int_delete, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 362, 347, 213, 410, 267, 336, 253, 243, 464, 491, 287, 299, 299, 642 int_delete, 'absl::node_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 362, 347, 213, 410, 267, 336, 253, 243, 464, 491, 287, 299, 299, 642
int_insert, 'google::sparse_hash_map<int, int>', 177, 175, 155, 211, 162, 196, 215, 254, 237, 262, 300, 352, 281, 293
int_succ_lookup, 'google::sparse_hash_map<int, int>', 44, 44, 40, 56, 48, 59, 67, 76, 79, 95, 99, 114, 107, 112
int_nosucc_lookup, 'google::sparse_hash_map<int, int>', 551, 792, 168, 1570, 178, 195, 232, 279, 239, 360, 311, 378, 281, 294
int_delete, 'google::sparse_hash_map<int, int>', 81, 46, 53, 118, 62, 66, 80, 96, 88, 156, 121, 134, 119, 117
int_insert, 'google::sparse_hash_map<int, int>', 172, 214, 152, 187, 176, 191, 218, 283, 253, 258, 296, 344, 270, 294
int_succ_lookup, 'google::sparse_hash_map<int, int>', 40, 47, 40, 51, 53, 59, 74, 81, 81, 87, 102, 115, 99, 109
int_nosucc_lookup, 'google::sparse_hash_map<int, int>', 457, 916, 160, 1487, 185, 200, 244, 279, 236, 272, 298, 387, 279, 291
int_delete, 'google::sparse_hash_map<int, int>', 43, 53, 52, 55, 64, 69, 84, 93, 85, 105, 106, 197, 118, 118
int_insert, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 1031, 1306, 1189, 1503, 1499, 1294, 1435, 1587, 1193, 1350, 1492, 1692, 1336, 1384
int_succ_lookup, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 136, 157, 164, 187, 246, 177, 194, 204, 193, 196, 204, 223, 211, 215
int_nosucc_lookup, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 2306, 3944, 1232, 7215, 1555, 1408, 1465, 1600, 1203, 1344, 1504, 1678, 1183, 1266
int_delete, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 110, 137, 156, 158, 176, 180, 197, 218, 190, 199, 215, 249, 212, 219
int_insert, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 1027, 1232, 1102, 1408, 1187, 1344, 1382, 1543, 1146, 1747, 1561, 1652, 1143, 1251
int_succ_lookup, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 121, 161, 165, 188, 168, 175, 188, 204, 187, 301, 242, 234, 201, 218
int_nosucc_lookup, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 2275, 3930, 1178, 7380, 1109, 1241, 1389, 1572, 1162, 1744, 1565, 1705, 1132, 1223
int_delete, 'google::sparse_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 109, 142, 158, 291, 169, 182, 193, 253, 199, 218, 231, 245, 203, 224
int_insert, 'google::dense_hash_map<int, int>', 31, 54, 42, 41, 40, 38, 41, 44, 51, 39, 41, 44, 52, 56
int_succ_lookup, 'google::dense_hash_map<int, int>', 24, 19, 20, 20, 19, 21, 20, 22, 23, 22, 23, 23, 26, 26
int_nosucc_lookup, 'google::dense_hash_map<int, int>', 47, 42, 41, 46, 44, 39, 42, 47, 53, 41, 45, 52, 58, 68
int_delete, 'google::dense_hash_map<int, int>', 22, 38, 22, 17, 16, 13, 14, 15, 19, 14, 15, 17, 26, 26
int_insert, 'google::dense_hash_map<int, int>', 27, 79, 37, 35, 39, 36, 37, 42, 49, 40, 44, 44, 49, 64
int_succ_lookup, 'google::dense_hash_map<int, int>', 11, 32, 17, 17, 20, 22, 20, 21, 23, 22, 26, 26, 25, 27
int_nosucc_lookup, 'google::dense_hash_map<int, int>', 34, 87, 34, 39, 44, 40, 48, 45, 53, 42, 47, 49, 53, 61
int_delete, 'google::dense_hash_map<int, int>', 15, 46, 11, 12, 17, 15, 15, 15, 19, 14, 17, 18, 19, 24
int_insert, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 152, 154, 145, 158, 171, 156, 163, 169, 189, 175, 178, 191, 192, 197
int_succ_lookup, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 91, 98, 93, 108, 113, 105, 107, 110, 116, 116, 118, 121, 123, 125
int_nosucc_lookup, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 155, 163, 157, 170, 170, 157, 165, 178, 183, 167, 174, 181, 194, 190
int_delete, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 95, 90, 87, 99, 103, 92, 97, 110, 115, 100, 108, 110, 118, 123
int_insert, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 141, 144, 146, 172, 175, 158, 164, 176, 196, 171, 175, 182, 190, 195
int_succ_lookup, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 79, 95, 104, 106, 111, 102, 110, 111, 118, 111, 114, 123, 123, 125
int_nosucc_lookup, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 181, 164, 158, 165, 171, 166, 182, 186, 186, 169, 178, 189, 189, 193
int_delete, 'google::dense_hash_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 85, 78, 94, 94, 110, 97, 99, 110, 123, 113, 113, 109, 138, 124
int_insert, 'std::unordered_map<int, int>', 138, 228, 207, 219, 241, 250, 257, 265, 268, 286, 281, 274, 286, 291
int_succ_lookup, 'std::unordered_map<int, int>', 68, 162, 91, 105, 111, 112, 116, 121, 117, 126, 125, 139, 126, 127
int_nosucc_lookup, 'std::unordered_map<int, int>', 625, 836, 1392, 1522, 2005, 260, 2765, 281, 271, 4995, 281, 291, 7592, 293
int_delete, 'std::unordered_map<int, int>', 162, 183, 237, 265, 278, 320, 305, 311, 302, 329, 354, 336, 393, 335
int_insert, 'std::unordered_map<int, int>', 198, 262, 246, 227, 239, 241, 263, 257, 275, 272, 406, 471, 291, 285
int_succ_lookup, 'std::unordered_map<int, int>', 88, 94, 112, 104, 107, 111, 118, 120, 127, 121, 340, 249, 129, 130
int_nosucc_lookup, 'std::unordered_map<int, int>', 407, 1019, 1505, 1527, 2201, 259, 2727, 268, 258, 4865, 367, 442, 7269, 290
int_delete, 'std::unordered_map<int, int>', 223, 246, 402, 250, 347, 281, 300, 306, 305, 325, 384, 441, 342, 341
int_insert, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 248, 372, 335, 352, 351, 355, 372, 377, 414, 422, 449, 429, 440, 435
int_succ_lookup, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 171, 298, 276, 286, 297, 292, 304, 330, 302, 310, 308, 318, 319, 322
int_nosucc_lookup, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 886, 1381, 1780, 2165, 2671, 391, 3570, 434, 429, 6488, 414, 435, 9678, 435
int_delete, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 370, 348, 342, 366, 372, 373, 384, 417, 387, 393, 396, 402, 396, 401
int_insert, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 367, 298, 336, 354, 361, 376, 371, 370, 366, 382, 402, 386, 399, 397
int_succ_lookup, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 299, 242, 270, 282, 299, 292, 303, 417, 328, 316, 317, 316, 331, 362
int_nosucc_lookup, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 757, 1204, 1897, 2121, 2788, 382, 3596, 390, 405, 6269, 409, 412, 9452, 405
int_delete, 'std::unordered_map<std::__cxx11::basic_string<char>, std::__cxx11::basic_string<char> >', 402, 320, 330, 359, 366, 378, 384, 378, 373, 407, 389, 397, 410, 452
Can't render this file because it has a wrong number of fields in line 242.

View File

@ -0,0 +1,369 @@
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ----
//
// This is just a very thin wrapper over densehashtable.h, just
// like sgi stl's stl_hash_map is a very thin wrapper over
// stl_hashtable. The major thing we define is operator[], because
// we have a concept of a data_type which stl_hashtable doesn't
// (it only has a key and a value).
//
// NOTE: this is exactly like sparse_hash_map.h, with the word
// "sparse" replaced by "dense", except for the addition of
// set_empty_key().
//
// YOU MUST CALL SET_EMPTY_KEY() IMMEDIATELY AFTER CONSTRUCTION.
//
// Otherwise your program will die in mysterious ways. (Note if you
// use the constructor that takes an InputIterator range, you pass in
// the empty key in the constructor, rather than after. As a result,
// this constructor differs from the standard STL version.)
//
// In other respects, we adhere mostly to the STL semantics for
// hash-map. One important exception is that insert() may invalidate
// iterators entirely -- STL semantics are that insert() may reorder
// iterators, but they all still refer to something valid in the
// hashtable. Not so for us. Likewise, insert() may invalidate
// pointers into the hashtable. (Whether insert invalidates iterators
// and pointers depends on whether it results in a hashtable resize).
// On the plus side, delete() doesn't invalidate iterators or pointers
// at all, or even change the ordering of elements.
//
// Here are a few "power user" tips:
//
// 1) set_deleted_key():
// If you want to use erase() you *must* call set_deleted_key(),
// in addition to set_empty_key(), after construction.
// The deleted and empty keys must differ.
//
// 2) resize(0):
// When an item is deleted, its memory isn't freed right
// away. This allows you to iterate over a hashtable,
// and call erase(), without invalidating the iterator.
// To force the memory to be freed, call resize(0).
// For tr1 compatibility, this can also be called as rehash(0).
//
// 3) min_load_factor(0.0)
// Setting the minimum load factor to 0.0 guarantees that
// the hash table will never shrink.
//
// Roughly speaking:
// (1) dense_hash_map: fastest, uses the most memory unless entries are small
// (2) sparse_hash_map: slowest, uses the least memory
// (3) hash_map / unordered_map (STL): in the middle
//
// Typically I use sparse_hash_map when I care about space and/or when
// I need to save the hashtable on disk. I use hash_map otherwise. I
// don't personally use dense_hash_set ever; some people use it for
// small sets with lots of lookups.
//
// - dense_hash_map has, typically, about 78% memory overhead (if your
// data takes up X bytes, the hash_map uses .78X more bytes in overhead).
// - sparse_hash_map has about 4 bits overhead per entry.
// - sparse_hash_map can be 3-7 times slower than the others for lookup and,
// especially, inserts. See time_hash_map.cc for details.
//
// See /usr/(local/)?doc/sparsehash-*/dense_hash_map.html
// for information about how to use this class.
#ifndef _DENSE_HASH_MAP_H_
#define _DENSE_HASH_MAP_H_
#include <sparsehash/internal/sparseconfig.h>
#include <algorithm> // needed by stl_alloc
#include <functional> // for equal_to<>, select1st<>, etc
#include <memory> // for alloc
#include <utility> // for pair<>
#include <sparsehash/internal/densehashtable.h> // IWYU pragma: export
#include <sparsehash/internal/libc_allocator_with_realloc.h>
#include HASH_FUN_H // for hash<>
_START_GOOGLE_NAMESPACE_
template <class Key, class T,
class HashFcn = SPARSEHASH_HASH<Key>, // defined in sparseconfig.h
class EqualKey = std::equal_to<Key>,
class Alloc = libc_allocator_with_realloc<std::pair<const Key, T> > >
class dense_hash_map {
private:
// Apparently select1st is not stl-standard, so we define our own
struct SelectKey {
typedef const Key& result_type;
const Key& operator()(const std::pair<const Key, T>& p) const {
return p.first;
}
};
struct SetKey {
void operator()(std::pair<const Key, T>* value, const Key& new_key) const {
*const_cast<Key*>(&value->first) = new_key;
// It would be nice to clear the rest of value here as well, in
// case it's taking up a lot of memory. We do this by clearing
// the value. This assumes T has a zero-arg constructor!
value->second = T();
}
};
// For operator[].
struct DefaultValue {
std::pair<const Key, T> operator()(const Key& key) {
return std::make_pair(key, T());
}
};
// The actual data
typedef dense_hashtable<std::pair<const Key, T>, Key, HashFcn, SelectKey,
SetKey, EqualKey, Alloc> ht;
ht rep;
public:
typedef typename ht::key_type key_type;
typedef T data_type;
typedef T mapped_type;
typedef typename ht::value_type value_type;
typedef typename ht::hasher hasher;
typedef typename ht::key_equal key_equal;
typedef Alloc allocator_type;
typedef typename ht::size_type size_type;
typedef typename ht::difference_type difference_type;
typedef typename ht::pointer pointer;
typedef typename ht::const_pointer const_pointer;
typedef typename ht::reference reference;
typedef typename ht::const_reference const_reference;
typedef typename ht::iterator iterator;
typedef typename ht::const_iterator const_iterator;
typedef typename ht::local_iterator local_iterator;
typedef typename ht::const_local_iterator const_local_iterator;
// Iterator functions
iterator begin() { return rep.begin(); }
iterator end() { return rep.end(); }
const_iterator begin() const { return rep.begin(); }
const_iterator end() const { return rep.end(); }
// These come from tr1's unordered_map. For us, a bucket has 0 or 1 elements.
local_iterator begin(size_type i) { return rep.begin(i); }
local_iterator end(size_type i) { return rep.end(i); }
const_local_iterator begin(size_type i) const { return rep.begin(i); }
const_local_iterator end(size_type i) const { return rep.end(i); }
// Accessor functions
allocator_type get_allocator() const { return rep.get_allocator(); }
hasher hash_funct() const { return rep.hash_funct(); }
hasher hash_function() const { return hash_funct(); }
key_equal key_eq() const { return rep.key_eq(); }
// Constructors
explicit dense_hash_map(size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, SelectKey(), SetKey(), alloc) {
}
template <class InputIterator>
dense_hash_map(InputIterator f, InputIterator l,
const key_type& empty_key_val,
size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, SelectKey(), SetKey(), alloc) {
set_empty_key(empty_key_val);
rep.insert(f, l);
}
// We use the default copy constructor
// We use the default operator=()
// We use the default destructor
void clear() { rep.clear(); }
// This clears the hash map without resizing it down to the minimum
// bucket count, but rather keeps the number of buckets constant
void clear_no_resize() { rep.clear_no_resize(); }
void swap(dense_hash_map& hs) { rep.swap(hs.rep); }
// Functions concerning size
size_type size() const { return rep.size(); }
size_type max_size() const { return rep.max_size(); }
bool empty() const { return rep.empty(); }
size_type bucket_count() const { return rep.bucket_count(); }
size_type max_bucket_count() const { return rep.max_bucket_count(); }
// These are tr1 methods. bucket() is the bucket the key is or would be in.
size_type bucket_size(size_type i) const { return rep.bucket_size(i); }
size_type bucket(const key_type& key) const { return rep.bucket(key); }
float load_factor() const {
return size() * 1.0f / bucket_count();
}
float max_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return grow;
}
void max_load_factor(float new_grow) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(shrink, new_grow);
}
// These aren't tr1 methods but perhaps ought to be.
float min_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return shrink;
}
void min_load_factor(float new_shrink) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(new_shrink, grow);
}
// Deprecated; use min_load_factor() or max_load_factor() instead.
void set_resizing_parameters(float shrink, float grow) {
rep.set_resizing_parameters(shrink, grow);
}
void resize(size_type hint) { rep.resize(hint); }
void rehash(size_type hint) { resize(hint); } // the tr1 name
// Lookup routines
iterator find(const key_type& key) { return rep.find(key); }
const_iterator find(const key_type& key) const { return rep.find(key); }
data_type& operator[](const key_type& key) { // This is our value-add!
// If key is in the hashtable, returns find(key)->second,
// otherwise returns insert(value_type(key, T()).first->second.
// Note it does not create an empty T unless the find fails.
return rep.template find_or_insert<DefaultValue>(key).second;
}
size_type count(const key_type& key) const { return rep.count(key); }
std::pair<iterator, iterator> equal_range(const key_type& key) {
return rep.equal_range(key);
}
std::pair<const_iterator, const_iterator> equal_range(const key_type& key)
const {
return rep.equal_range(key);
}
// Insertion routines
std::pair<iterator, bool> insert(const value_type& obj) {
return rep.insert(obj);
}
template <class InputIterator> void insert(InputIterator f, InputIterator l) {
rep.insert(f, l);
}
void insert(const_iterator f, const_iterator l) {
rep.insert(f, l);
}
// Required for std::insert_iterator; the passed-in iterator is ignored.
iterator insert(iterator, const value_type& obj) {
return insert(obj).first;
}
// Deletion and empty routines
// THESE ARE NON-STANDARD! I make you specify an "impossible" key
// value to identify deleted and empty buckets. You can change the
// deleted key as time goes on, or get rid of it entirely to be insert-only.
void set_empty_key(const key_type& key) { // YOU MUST CALL THIS!
rep.set_empty_key(value_type(key, data_type())); // rep wants a value
}
key_type empty_key() const {
return rep.empty_key().first; // rep returns a value
}
void set_deleted_key(const key_type& key) { rep.set_deleted_key(key); }
void clear_deleted_key() { rep.clear_deleted_key(); }
key_type deleted_key() const { return rep.deleted_key(); }
// These are standard
size_type erase(const key_type& key) { return rep.erase(key); }
void erase(iterator it) { rep.erase(it); }
void erase(iterator f, iterator l) { rep.erase(f, l); }
// Comparison
bool operator==(const dense_hash_map& hs) const { return rep == hs.rep; }
bool operator!=(const dense_hash_map& hs) const { return rep != hs.rep; }
// I/O -- this is an add-on for writing hash map to disk
//
// For maximum flexibility, this does not assume a particular
// file type (though it will probably be a FILE *). We just pass
// the fp through to rep.
// If your keys and values are simple enough, you can pass this
// serializer to serialize()/unserialize(). "Simple enough" means
// value_type is a POD type that contains no pointers. Note,
// however, we don't try to normalize endianness.
typedef typename ht::NopointerSerializer NopointerSerializer;
// serializer: a class providing operator()(OUTPUT*, const value_type&)
// (writing value_type to OUTPUT). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a
// pointer to a class providing size_t Write(const void*, size_t),
// which writes a buffer into a stream (which fp presumably
// owns) and returns the number of bytes successfully written.
// Note basic_ostream<not_char> is not currently supported.
template <typename ValueSerializer, typename OUTPUT>
bool serialize(ValueSerializer serializer, OUTPUT* fp) {
return rep.serialize(serializer, fp);
}
// serializer: a functor providing operator()(INPUT*, value_type*)
// (reading from INPUT and into value_type). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a
// pointer to a class providing size_t Read(void*, size_t),
// which reads into a buffer from a stream (which fp presumably
// owns) and returns the number of bytes successfully read.
// Note basic_istream<not_char> is not currently supported.
// NOTE: Since value_type is std::pair<const Key, T>, ValueSerializer
// may need to do a const cast in order to fill in the key.
template <typename ValueSerializer, typename INPUT>
bool unserialize(ValueSerializer serializer, INPUT* fp) {
return rep.unserialize(serializer, fp);
}
};
// We need a global swap as well
template <class Key, class T, class HashFcn, class EqualKey, class Alloc>
inline void swap(dense_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm1,
dense_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm2) {
hm1.swap(hm2);
}
_END_GOOGLE_NAMESPACE_
#endif /* _DENSE_HASH_MAP_H_ */

View File

@ -0,0 +1,338 @@
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// This is just a very thin wrapper over densehashtable.h, just
// like sgi stl's stl_hash_set is a very thin wrapper over
// stl_hashtable. The major thing we define is operator[], because
// we have a concept of a data_type which stl_hashtable doesn't
// (it only has a key and a value).
//
// This is more different from dense_hash_map than you might think,
// because all iterators for sets are const (you obviously can't
// change the key, and for sets there is no value).
//
// NOTE: this is exactly like sparse_hash_set.h, with the word
// "sparse" replaced by "dense", except for the addition of
// set_empty_key().
//
// YOU MUST CALL SET_EMPTY_KEY() IMMEDIATELY AFTER CONSTRUCTION.
//
// Otherwise your program will die in mysterious ways. (Note if you
// use the constructor that takes an InputIterator range, you pass in
// the empty key in the constructor, rather than after. As a result,
// this constructor differs from the standard STL version.)
//
// In other respects, we adhere mostly to the STL semantics for
// hash-map. One important exception is that insert() may invalidate
// iterators entirely -- STL semantics are that insert() may reorder
// iterators, but they all still refer to something valid in the
// hashtable. Not so for us. Likewise, insert() may invalidate
// pointers into the hashtable. (Whether insert invalidates iterators
// and pointers depends on whether it results in a hashtable resize).
// On the plus side, delete() doesn't invalidate iterators or pointers
// at all, or even change the ordering of elements.
//
// Here are a few "power user" tips:
//
// 1) set_deleted_key():
// If you want to use erase() you must call set_deleted_key(),
// in addition to set_empty_key(), after construction.
// The deleted and empty keys must differ.
//
// 2) resize(0):
// When an item is deleted, its memory isn't freed right
// away. This allows you to iterate over a hashtable,
// and call erase(), without invalidating the iterator.
// To force the memory to be freed, call resize(0).
// For tr1 compatibility, this can also be called as rehash(0).
//
// 3) min_load_factor(0.0)
// Setting the minimum load factor to 0.0 guarantees that
// the hash table will never shrink.
//
// Roughly speaking:
// (1) dense_hash_set: fastest, uses the most memory unless entries are small
// (2) sparse_hash_set: slowest, uses the least memory
// (3) hash_set / unordered_set (STL): in the middle
//
// Typically I use sparse_hash_set when I care about space and/or when
// I need to save the hashtable on disk. I use hash_set otherwise. I
// don't personally use dense_hash_set ever; some people use it for
// small sets with lots of lookups.
//
// - dense_hash_set has, typically, about 78% memory overhead (if your
// data takes up X bytes, the hash_set uses .78X more bytes in overhead).
// - sparse_hash_set has about 4 bits overhead per entry.
// - sparse_hash_set can be 3-7 times slower than the others for lookup and,
// especially, inserts. See time_hash_map.cc for details.
//
// See /usr/(local/)?doc/sparsehash-*/dense_hash_set.html
// for information about how to use this class.
#ifndef _DENSE_HASH_SET_H_
#define _DENSE_HASH_SET_H_
#include <sparsehash/internal/sparseconfig.h>
#include <algorithm> // needed by stl_alloc
#include <functional> // for equal_to<>, select1st<>, etc
#include <memory> // for alloc
#include <utility> // for pair<>
#include <sparsehash/internal/densehashtable.h> // IWYU pragma: export
#include <sparsehash/internal/libc_allocator_with_realloc.h>
#include HASH_FUN_H // for hash<>
_START_GOOGLE_NAMESPACE_
template <class Value,
class HashFcn = SPARSEHASH_HASH<Value>, // defined in sparseconfig.h
class EqualKey = std::equal_to<Value>,
class Alloc = libc_allocator_with_realloc<Value> >
class dense_hash_set {
private:
// Apparently identity is not stl-standard, so we define our own
struct Identity {
typedef const Value& result_type;
const Value& operator()(const Value& v) const { return v; }
};
struct SetKey {
void operator()(Value* value, const Value& new_key) const {
*value = new_key;
}
};
// The actual data
typedef dense_hashtable<Value, Value, HashFcn, Identity, SetKey,
EqualKey, Alloc> ht;
ht rep;
public:
typedef typename ht::key_type key_type;
typedef typename ht::value_type value_type;
typedef typename ht::hasher hasher;
typedef typename ht::key_equal key_equal;
typedef Alloc allocator_type;
typedef typename ht::size_type size_type;
typedef typename ht::difference_type difference_type;
typedef typename ht::const_pointer pointer;
typedef typename ht::const_pointer const_pointer;
typedef typename ht::const_reference reference;
typedef typename ht::const_reference const_reference;
typedef typename ht::const_iterator iterator;
typedef typename ht::const_iterator const_iterator;
typedef typename ht::const_local_iterator local_iterator;
typedef typename ht::const_local_iterator const_local_iterator;
// Iterator functions -- recall all iterators are const
iterator begin() const { return rep.begin(); }
iterator end() const { return rep.end(); }
// These come from tr1's unordered_set. For us, a bucket has 0 or 1 elements.
local_iterator begin(size_type i) const { return rep.begin(i); }
local_iterator end(size_type i) const { return rep.end(i); }
// Accessor functions
allocator_type get_allocator() const { return rep.get_allocator(); }
hasher hash_funct() const { return rep.hash_funct(); }
hasher hash_function() const { return hash_funct(); } // tr1 name
key_equal key_eq() const { return rep.key_eq(); }
// Constructors
explicit dense_hash_set(size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, Identity(), SetKey(), alloc) {
}
template <class InputIterator>
dense_hash_set(InputIterator f, InputIterator l,
const key_type& empty_key_val,
size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, Identity(), SetKey(), alloc) {
set_empty_key(empty_key_val);
rep.insert(f, l);
}
// We use the default copy constructor
// We use the default operator=()
// We use the default destructor
void clear() { rep.clear(); }
// This clears the hash set without resizing it down to the minimum
// bucket count, but rather keeps the number of buckets constant
void clear_no_resize() { rep.clear_no_resize(); }
void swap(dense_hash_set& hs) { rep.swap(hs.rep); }
// Functions concerning size
size_type size() const { return rep.size(); }
size_type max_size() const { return rep.max_size(); }
bool empty() const { return rep.empty(); }
size_type bucket_count() const { return rep.bucket_count(); }
size_type max_bucket_count() const { return rep.max_bucket_count(); }
// These are tr1 methods. bucket() is the bucket the key is or would be in.
size_type bucket_size(size_type i) const { return rep.bucket_size(i); }
size_type bucket(const key_type& key) const { return rep.bucket(key); }
float load_factor() const {
return size() * 1.0f / bucket_count();
}
float max_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return grow;
}
void max_load_factor(float new_grow) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(shrink, new_grow);
}
// These aren't tr1 methods but perhaps ought to be.
float min_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return shrink;
}
void min_load_factor(float new_shrink) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(new_shrink, grow);
}
// Deprecated; use min_load_factor() or max_load_factor() instead.
void set_resizing_parameters(float shrink, float grow) {
rep.set_resizing_parameters(shrink, grow);
}
void resize(size_type hint) { rep.resize(hint); }
void rehash(size_type hint) { resize(hint); } // the tr1 name
// Lookup routines
iterator find(const key_type& key) const { return rep.find(key); }
size_type count(const key_type& key) const { return rep.count(key); }
std::pair<iterator, iterator> equal_range(const key_type& key) const {
return rep.equal_range(key);
}
// Insertion routines
std::pair<iterator, bool> insert(const value_type& obj) {
std::pair<typename ht::iterator, bool> p = rep.insert(obj);
return std::pair<iterator, bool>(p.first, p.second); // const to non-const
}
template <class InputIterator> void insert(InputIterator f, InputIterator l) {
rep.insert(f, l);
}
void insert(const_iterator f, const_iterator l) {
rep.insert(f, l);
}
// Required for std::insert_iterator; the passed-in iterator is ignored.
iterator insert(iterator, const value_type& obj) {
return insert(obj).first;
}
// Deletion and empty routines
// THESE ARE NON-STANDARD! I make you specify an "impossible" key
// value to identify deleted and empty buckets. You can change the
// deleted key as time goes on, or get rid of it entirely to be insert-only.
void set_empty_key(const key_type& key) { rep.set_empty_key(key); }
key_type empty_key() const { return rep.empty_key(); }
void set_deleted_key(const key_type& key) { rep.set_deleted_key(key); }
void clear_deleted_key() { rep.clear_deleted_key(); }
key_type deleted_key() const { return rep.deleted_key(); }
// These are standard
size_type erase(const key_type& key) { return rep.erase(key); }
void erase(iterator it) { rep.erase(it); }
void erase(iterator f, iterator l) { rep.erase(f, l); }
// Comparison
bool operator==(const dense_hash_set& hs) const { return rep == hs.rep; }
bool operator!=(const dense_hash_set& hs) const { return rep != hs.rep; }
// I/O -- this is an add-on for writing metainformation to disk
//
// For maximum flexibility, this does not assume a particular
// file type (though it will probably be a FILE *). We just pass
// the fp through to rep.
// If your keys and values are simple enough, you can pass this
// serializer to serialize()/unserialize(). "Simple enough" means
// value_type is a POD type that contains no pointers. Note,
// however, we don't try to normalize endianness.
typedef typename ht::NopointerSerializer NopointerSerializer;
// serializer: a class providing operator()(OUTPUT*, const value_type&)
// (writing value_type to OUTPUT). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a
// pointer to a class providing size_t Write(const void*, size_t),
// which writes a buffer into a stream (which fp presumably
// owns) and returns the number of bytes successfully written.
// Note basic_ostream<not_char> is not currently supported.
template <typename ValueSerializer, typename OUTPUT>
bool serialize(ValueSerializer serializer, OUTPUT* fp) {
return rep.serialize(serializer, fp);
}
// serializer: a functor providing operator()(INPUT*, value_type*)
// (reading from INPUT and into value_type). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a
// pointer to a class providing size_t Read(void*, size_t),
// which reads into a buffer from a stream (which fp presumably
// owns) and returns the number of bytes successfully read.
// Note basic_istream<not_char> is not currently supported.
template <typename ValueSerializer, typename INPUT>
bool unserialize(ValueSerializer serializer, INPUT* fp) {
return rep.unserialize(serializer, fp);
}
};
template <class Val, class HashFcn, class EqualKey, class Alloc>
inline void swap(dense_hash_set<Val, HashFcn, EqualKey, Alloc>& hs1,
dense_hash_set<Val, HashFcn, EqualKey, Alloc>& hs2) {
hs1.swap(hs2);
}
_END_GOOGLE_NAMESPACE_
#endif /* _DENSE_HASH_SET_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,381 @@
// Copyright (c) 2010, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// Provides classes shared by both sparse and dense hashtable.
//
// sh_hashtable_settings has parameters for growing and shrinking
// a hashtable. It also packages zero-size functor (ie. hasher).
//
// Other functions and classes provide common code for serializing
// and deserializing hashtables to a stream (such as a FILE*).
#ifndef UTIL_GTL_HASHTABLE_COMMON_H_
#define UTIL_GTL_HASHTABLE_COMMON_H_
#include <sparsehash/internal/sparseconfig.h>
#include <assert.h>
#include <stdio.h>
#include <stddef.h> // for size_t
#include <iosfwd>
#include <stdexcept> // For length_error
_START_GOOGLE_NAMESPACE_
template <bool> struct SparsehashCompileAssert { };
#define SPARSEHASH_COMPILE_ASSERT(expr, msg) \
__attribute__((unused)) typedef SparsehashCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
namespace sparsehash_internal {
// Adaptor methods for reading/writing data from an INPUT or OUPTUT
// variable passed to serialize() or unserialize(). For now we
// have implemented INPUT/OUTPUT for FILE*, istream*/ostream* (note
// they are pointers, unlike typical use), or else a pointer to
// something that supports a Read()/Write() method.
//
// For technical reasons, we implement read_data/write_data in two
// stages. The actual work is done in *_data_internal, which takes
// the stream argument twice: once as a template type, and once with
// normal type information. (We only use the second version.) We do
// this because of how C++ picks what function overload to use. If we
// implemented this the naive way:
// bool read_data(istream* is, const void* data, size_t length);
// template<typename T> read_data(T* fp, const void* data, size_t length);
// C++ would prefer the second version for every stream type except
// istream. However, we want C++ to prefer the first version for
// streams that are *subclasses* of istream, such as istringstream.
// This is not possible given the way template types are resolved. So
// we split the stream argument in two, one of which is templated and
// one of which is not. The specialized functions (like the istream
// version above) ignore the template arg and use the second, 'type'
// arg, getting subclass matching as normal. The 'catch-all'
// functions (the second version above) use the template arg to deduce
// the type, and use a second, void* arg to achieve the desired
// 'catch-all' semantics.
// ----- low-level I/O for FILE* ----
template<typename Ignored>
inline bool read_data_internal(Ignored*, FILE* fp,
void* data, size_t length) {
return fread(data, length, 1, fp) == 1;
}
template<typename Ignored>
inline bool write_data_internal(Ignored*, FILE* fp,
const void* data, size_t length) {
return fwrite(data, length, 1, fp) == 1;
}
// ----- low-level I/O for iostream ----
// We want the caller to be responsible for #including <iostream>, not
// us, because iostream is a big header! According to the standard,
// it's only legal to delay the instantiation the way we want to if
// the istream/ostream is a template type. So we jump through hoops.
template<typename ISTREAM>
inline bool read_data_internal_for_istream(ISTREAM* fp,
void* data, size_t length) {
return fp->read(reinterpret_cast<char*>(data), length).good();
}
template<typename Ignored>
inline bool read_data_internal(Ignored*, std::istream* fp,
void* data, size_t length) {
return read_data_internal_for_istream(fp, data, length);
}
template<typename OSTREAM>
inline bool write_data_internal_for_ostream(OSTREAM* fp,
const void* data, size_t length) {
return fp->write(reinterpret_cast<const char*>(data), length).good();
}
template<typename Ignored>
inline bool write_data_internal(Ignored*, std::ostream* fp,
const void* data, size_t length) {
return write_data_internal_for_ostream(fp, data, length);
}
// ----- low-level I/O for custom streams ----
// The INPUT type needs to support a Read() method that takes a
// buffer and a length and returns the number of bytes read.
template <typename INPUT>
inline bool read_data_internal(INPUT* fp, void*,
void* data, size_t length) {
return static_cast<size_t>(fp->Read(data, length)) == length;
}
// The OUTPUT type needs to support a Write() operation that takes
// a buffer and a length and returns the number of bytes written.
template <typename OUTPUT>
inline bool write_data_internal(OUTPUT* fp, void*,
const void* data, size_t length) {
return static_cast<size_t>(fp->Write(data, length)) == length;
}
// ----- low-level I/O: the public API ----
template <typename INPUT>
inline bool read_data(INPUT* fp, void* data, size_t length) {
return read_data_internal(fp, fp, data, length);
}
template <typename OUTPUT>
inline bool write_data(OUTPUT* fp, const void* data, size_t length) {
return write_data_internal(fp, fp, data, length);
}
// Uses read_data() and write_data() to read/write an integer.
// length is the number of bytes to read/write (which may differ
// from sizeof(IntType), allowing us to save on a 32-bit system
// and load on a 64-bit system). Excess bytes are taken to be 0.
// INPUT and OUTPUT must match legal inputs to read/write_data (above).
template <typename INPUT, typename IntType>
bool read_bigendian_number(INPUT* fp, IntType* value, size_t length) {
*value = 0;
unsigned char byte;
// We require IntType to be unsigned or else the shifting gets all screwy.
SPARSEHASH_COMPILE_ASSERT(static_cast<IntType>(-1) > static_cast<IntType>(0),
serializing_int_requires_an_unsigned_type);
for (size_t i = 0; i < length; ++i) {
if (!read_data(fp, &byte, sizeof(byte))) return false;
*value |= static_cast<IntType>(byte) << ((length - 1 - i) * 8);
}
return true;
}
template <typename OUTPUT, typename IntType>
bool write_bigendian_number(OUTPUT* fp, IntType value, size_t length) {
unsigned char byte;
// We require IntType to be unsigned or else the shifting gets all screwy.
SPARSEHASH_COMPILE_ASSERT(static_cast<IntType>(-1) > static_cast<IntType>(0),
serializing_int_requires_an_unsigned_type);
for (size_t i = 0; i < length; ++i) {
byte = (sizeof(value) <= length-1 - i)
? 0 : static_cast<unsigned char>((value >> ((length-1 - i) * 8)) & 255);
if (!write_data(fp, &byte, sizeof(byte))) return false;
}
return true;
}
// If your keys and values are simple enough, you can pass this
// serializer to serialize()/unserialize(). "Simple enough" means
// value_type is a POD type that contains no pointers. Note,
// however, we don't try to normalize endianness.
// This is the type used for NopointerSerializer.
template <typename value_type> struct pod_serializer {
template <typename INPUT>
bool operator()(INPUT* fp, value_type* value) const {
return read_data(fp, value, sizeof(*value));
}
template <typename OUTPUT>
bool operator()(OUTPUT* fp, const value_type& value) const {
return write_data(fp, &value, sizeof(value));
}
};
// Settings contains parameters for growing and shrinking the table.
// It also packages zero-size functor (ie. hasher).
//
// It does some munging of the hash value in cases where we think
// (fear) the original hash function might not be very good. In
// particular, the default hash of pointers is the identity hash,
// so probably all the low bits are 0. We identify when we think
// we're hashing a pointer, and chop off the low bits. Note this
// isn't perfect: even when the key is a pointer, we can't tell
// for sure that the hash is the identity hash. If it's not, this
// is needless work (and possibly, though not likely, harmful).
template<typename Key, typename HashFunc,
typename SizeType, int HT_MIN_BUCKETS>
class sh_hashtable_settings : public HashFunc {
public:
typedef Key key_type;
typedef HashFunc hasher;
typedef SizeType size_type;
public:
sh_hashtable_settings(const hasher& hf,
const float ht_occupancy_flt,
const float ht_empty_flt)
: hasher(hf),
enlarge_threshold_(0),
shrink_threshold_(0),
consider_shrink_(false),
use_empty_(false),
use_deleted_(false),
num_ht_copies_(0) {
set_enlarge_factor(ht_occupancy_flt);
set_shrink_factor(ht_empty_flt);
}
size_type hash(const key_type& v) const {
// We munge the hash value when we don't trust hasher::operator().
return hash_munger<Key>::MungedHash(hasher::operator()(v));
}
float enlarge_factor() const {
return enlarge_factor_;
}
void set_enlarge_factor(float f) {
enlarge_factor_ = f;
}
float shrink_factor() const {
return shrink_factor_;
}
void set_shrink_factor(float f) {
shrink_factor_ = f;
}
size_type enlarge_threshold() const {
return enlarge_threshold_;
}
void set_enlarge_threshold(size_type t) {
enlarge_threshold_ = t;
}
size_type shrink_threshold() const {
return shrink_threshold_;
}
void set_shrink_threshold(size_type t) {
shrink_threshold_ = t;
}
size_type enlarge_size(size_type x) const {
return static_cast<size_type>(x * enlarge_factor_);
}
size_type shrink_size(size_type x) const {
return static_cast<size_type>(x * shrink_factor_);
}
bool consider_shrink() const {
return consider_shrink_;
}
void set_consider_shrink(bool t) {
consider_shrink_ = t;
}
bool use_empty() const {
return use_empty_;
}
void set_use_empty(bool t) {
use_empty_ = t;
}
bool use_deleted() const {
return use_deleted_;
}
void set_use_deleted(bool t) {
use_deleted_ = t;
}
size_type num_ht_copies() const {
return static_cast<size_type>(num_ht_copies_);
}
void inc_num_ht_copies() {
++num_ht_copies_;
}
// Reset the enlarge and shrink thresholds
void reset_thresholds(size_type num_buckets) {
set_enlarge_threshold(enlarge_size(num_buckets));
set_shrink_threshold(shrink_size(num_buckets));
// whatever caused us to reset already considered
set_consider_shrink(false);
}
// Caller is resposible for calling reset_threshold right after
// set_resizing_parameters.
void set_resizing_parameters(float shrink, float grow) {
assert(shrink >= 0.0);
assert(grow <= 1.0);
if (shrink > grow/2.0f)
shrink = grow / 2.0f; // otherwise we thrash hashtable size
set_shrink_factor(shrink);
set_enlarge_factor(grow);
}
// This is the smallest size a hashtable can be without being too crowded
// If you like, you can give a min #buckets as well as a min #elts
size_type min_buckets(size_type num_elts, size_type min_buckets_wanted) {
float enlarge = enlarge_factor();
size_type sz = HT_MIN_BUCKETS; // min buckets allowed
while ( sz < min_buckets_wanted ||
num_elts >= static_cast<size_type>(sz * enlarge) ) {
// This just prevents overflowing size_type, since sz can exceed
// max_size() here.
if (static_cast<size_type>(sz * 2) < sz) {
throw std::length_error("resize overflow"); // protect against overflow
}
sz *= 2;
}
return sz;
}
private:
template<class HashKey> class hash_munger {
public:
static size_t MungedHash(size_t hash) {
return hash;
}
};
// This matches when the hashtable key is a pointer.
template<class HashKey> class hash_munger<HashKey*> {
public:
static size_t MungedHash(size_t hash) {
// TODO(csilvers): consider rotating instead:
// static const int shift = (sizeof(void *) == 4) ? 2 : 3;
// return (hash << (sizeof(hash) * 8) - shift)) | (hash >> shift);
// This matters if we ever change sparse/dense_hash_* to compare
// hashes before comparing actual values. It's speedy on x86.
return hash / sizeof(void*); // get rid of known-0 bits
}
};
size_type enlarge_threshold_; // table.size() * enlarge_factor
size_type shrink_threshold_; // table.size() * shrink_factor
float enlarge_factor_; // how full before resize
float shrink_factor_; // how empty before resize
// consider_shrink=true if we should try to shrink before next insert
bool consider_shrink_;
bool use_empty_; // used only by densehashtable, not sparsehashtable
bool use_deleted_; // false until delkey has been set
// num_ht_copies is a counter incremented every Copy/Move
unsigned int num_ht_copies_;
};
} // namespace sparsehash_internal
#undef SPARSEHASH_COMPILE_ASSERT
_END_GOOGLE_NAMESPACE_
#endif // UTIL_GTL_HASHTABLE_COMMON_H_

View File

@ -0,0 +1,122 @@
// Copyright (c) 2010, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
#ifndef UTIL_GTL_LIBC_ALLOCATOR_WITH_REALLOC_H_
#define UTIL_GTL_LIBC_ALLOCATOR_WITH_REALLOC_H_
#include <sparsehash/internal/sparseconfig.h>
#include <stdlib.h> // for malloc/realloc/free
#include <stddef.h> // for ptrdiff_t
#include <new> // for placement new
_START_GOOGLE_NAMESPACE_
template<class T>
class libc_allocator_with_realloc {
public:
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
libc_allocator_with_realloc() {}
libc_allocator_with_realloc(const libc_allocator_with_realloc&) {}
~libc_allocator_with_realloc() {}
pointer address(reference r) const { return &r; }
const_pointer address(const_reference r) const { return &r; }
pointer allocate(size_type n, const_pointer = 0) {
return static_cast<pointer>(malloc(n * sizeof(value_type)));
}
void deallocate(pointer p, size_type) {
free(p);
}
pointer reallocate(pointer p, size_type n) {
// p points to a storage array whose objects have already been destroyed
// cast to void* to prevent compiler warnings about calling realloc() on
// an object which cannot be relocated in memory
return static_cast<pointer>(realloc(static_cast<void*>(p), n * sizeof(value_type)));
}
size_type max_size() const {
return static_cast<size_type>(-1) / sizeof(value_type);
}
void construct(pointer p, const value_type& val) {
new(p) value_type(val);
}
void destroy(pointer p) { p->~value_type(); }
template <class U>
libc_allocator_with_realloc(const libc_allocator_with_realloc<U>&) {}
template<class U>
struct rebind {
typedef libc_allocator_with_realloc<U> other;
};
};
// libc_allocator_with_realloc<void> specialization.
template<>
class libc_allocator_with_realloc<void> {
public:
typedef void value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef void* pointer;
typedef const void* const_pointer;
template<class U>
struct rebind {
typedef libc_allocator_with_realloc<U> other;
};
};
template<class T>
inline bool operator==(const libc_allocator_with_realloc<T>&,
const libc_allocator_with_realloc<T>&) {
return true;
}
template<class T>
inline bool operator!=(const libc_allocator_with_realloc<T>&,
const libc_allocator_with_realloc<T>&) {
return false;
}
_END_GOOGLE_NAMESPACE_
#endif // UTIL_GTL_LIBC_ALLOCATOR_WITH_REALLOC_H_

View File

@ -0,0 +1,46 @@
/*
* NOTE: This file is for internal use only.
* Do not use these #defines in your own program!
*/
/* Namespace for Google classes */
#define GOOGLE_NAMESPACE ::google
/* the location of the header defining hash functions */
#define HASH_FUN_H <functional>
/* the namespace of the hash<> function */
#define HASH_NAMESPACE std
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if the system has the type `long long'. */
#define HAVE_LONG_LONG 1
/* Define to 1 if you have the `memcpy' function. */
#define HAVE_MEMCPY 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if the system has the type `uint16_t'. */
#define HAVE_UINT16_T 1
/* Define to 1 if the system has the type `u_int16_t'. */
#define HAVE_U_INT16_T 1
/* Define to 1 if the system has the type `__uint16'. */
/* #undef HAVE___UINT16 */
/* The system-provided hash function including the namespace. */
#define SPARSEHASH_HASH HASH_NAMESPACE::hash
/* Stops putting the code inside the Google namespace */
#define _END_GOOGLE_NAMESPACE_ }
/* Puts following code inside the Google namespace */
#define _START_GOOGLE_NAMESPACE_ namespace google {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,363 @@
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// This is just a very thin wrapper over sparsehashtable.h, just
// like sgi stl's stl_hash_map is a very thin wrapper over
// stl_hashtable. The major thing we define is operator[], because
// we have a concept of a data_type which stl_hashtable doesn't
// (it only has a key and a value).
//
// We adhere mostly to the STL semantics for hash-map. One important
// exception is that insert() may invalidate iterators entirely -- STL
// semantics are that insert() may reorder iterators, but they all
// still refer to something valid in the hashtable. Not so for us.
// Likewise, insert() may invalidate pointers into the hashtable.
// (Whether insert invalidates iterators and pointers depends on
// whether it results in a hashtable resize). On the plus side,
// delete() doesn't invalidate iterators or pointers at all, or even
// change the ordering of elements.
//
// Here are a few "power user" tips:
//
// 1) set_deleted_key():
// Unlike STL's hash_map, if you want to use erase() you
// *must* call set_deleted_key() after construction.
//
// 2) resize(0):
// When an item is deleted, its memory isn't freed right
// away. This is what allows you to iterate over a hashtable
// and call erase() without invalidating the iterator.
// To force the memory to be freed, call resize(0).
// For tr1 compatibility, this can also be called as rehash(0).
//
// 3) min_load_factor(0.0)
// Setting the minimum load factor to 0.0 guarantees that
// the hash table will never shrink.
//
// Roughly speaking:
// (1) dense_hash_map: fastest, uses the most memory unless entries are small
// (2) sparse_hash_map: slowest, uses the least memory
// (3) hash_map / unordered_map (STL): in the middle
//
// Typically I use sparse_hash_map when I care about space and/or when
// I need to save the hashtable on disk. I use hash_map otherwise. I
// don't personally use dense_hash_map ever; some people use it for
// small maps with lots of lookups.
//
// - dense_hash_map has, typically, about 78% memory overhead (if your
// data takes up X bytes, the hash_map uses .78X more bytes in overhead).
// - sparse_hash_map has about 4 bits overhead per entry.
// - sparse_hash_map can be 3-7 times slower than the others for lookup and,
// especially, inserts. See time_hash_map.cc for details.
//
// See /usr/(local/)?doc/sparsehash-*/sparse_hash_map.html
// for information about how to use this class.
#ifndef _SPARSE_HASH_MAP_H_
#define _SPARSE_HASH_MAP_H_
#include <sparsehash/internal/sparseconfig.h>
#include <algorithm> // needed by stl_alloc
#include <functional> // for equal_to<>, select1st<>, etc
#include <memory> // for alloc
#include <utility> // for pair<>
#include <sparsehash/internal/libc_allocator_with_realloc.h>
#include <sparsehash/internal/sparsehashtable.h> // IWYU pragma: export
#include HASH_FUN_H // for hash<>
_START_GOOGLE_NAMESPACE_
template <class Key, class T,
class HashFcn = SPARSEHASH_HASH<Key>, // defined in sparseconfig.h
class EqualKey = std::equal_to<Key>,
class Alloc = libc_allocator_with_realloc<std::pair<const Key, T> > >
class sparse_hash_map {
private:
// Apparently select1st is not stl-standard, so we define our own
struct SelectKey {
typedef const Key& result_type;
const Key& operator()(const std::pair<const Key, T>& p) const {
return p.first;
}
};
struct SetKey {
void operator()(std::pair<const Key, T>* value, const Key& new_key) const {
*const_cast<Key*>(&value->first) = new_key;
// It would be nice to clear the rest of value here as well, in
// case it's taking up a lot of memory. We do this by clearing
// the value. This assumes T has a zero-arg constructor!
value->second = T();
}
};
// For operator[].
struct DefaultValue {
std::pair<const Key, T> operator()(const Key& key) {
return std::make_pair(key, T());
}
};
// The actual data
typedef sparse_hashtable<std::pair<const Key, T>, Key, HashFcn, SelectKey,
SetKey, EqualKey, Alloc> ht;
ht rep;
public:
typedef typename ht::key_type key_type;
typedef T data_type;
typedef T mapped_type;
typedef typename ht::value_type value_type;
typedef typename ht::hasher hasher;
typedef typename ht::key_equal key_equal;
typedef Alloc allocator_type;
typedef typename ht::size_type size_type;
typedef typename ht::difference_type difference_type;
typedef typename ht::pointer pointer;
typedef typename ht::const_pointer const_pointer;
typedef typename ht::reference reference;
typedef typename ht::const_reference const_reference;
typedef typename ht::iterator iterator;
typedef typename ht::const_iterator const_iterator;
typedef typename ht::local_iterator local_iterator;
typedef typename ht::const_local_iterator const_local_iterator;
// Iterator functions
iterator begin() { return rep.begin(); }
iterator end() { return rep.end(); }
const_iterator begin() const { return rep.begin(); }
const_iterator end() const { return rep.end(); }
// These come from tr1's unordered_map. For us, a bucket has 0 or 1 elements.
local_iterator begin(size_type i) { return rep.begin(i); }
local_iterator end(size_type i) { return rep.end(i); }
const_local_iterator begin(size_type i) const { return rep.begin(i); }
const_local_iterator end(size_type i) const { return rep.end(i); }
// Accessor functions
allocator_type get_allocator() const { return rep.get_allocator(); }
hasher hash_funct() const { return rep.hash_funct(); }
hasher hash_function() const { return hash_funct(); }
key_equal key_eq() const { return rep.key_eq(); }
// Constructors
explicit sparse_hash_map(size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, SelectKey(), SetKey(), alloc) {
}
template <class InputIterator>
sparse_hash_map(InputIterator f, InputIterator l,
size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, SelectKey(), SetKey(), alloc) {
rep.insert(f, l);
}
// We use the default copy constructor
// We use the default operator=()
// We use the default destructor
void clear() { rep.clear(); }
void swap(sparse_hash_map& hs) { rep.swap(hs.rep); }
// Functions concerning size
size_type size() const { return rep.size(); }
size_type max_size() const { return rep.max_size(); }
bool empty() const { return rep.empty(); }
size_type bucket_count() const { return rep.bucket_count(); }
size_type max_bucket_count() const { return rep.max_bucket_count(); }
// These are tr1 methods. bucket() is the bucket the key is or would be in.
size_type bucket_size(size_type i) const { return rep.bucket_size(i); }
size_type bucket(const key_type& key) const { return rep.bucket(key); }
float load_factor() const {
return size() * 1.0f / bucket_count();
}
float max_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return grow;
}
void max_load_factor(float new_grow) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(shrink, new_grow);
}
// These aren't tr1 methods but perhaps ought to be.
float min_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return shrink;
}
void min_load_factor(float new_shrink) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(new_shrink, grow);
}
// Deprecated; use min_load_factor() or max_load_factor() instead.
void set_resizing_parameters(float shrink, float grow) {
rep.set_resizing_parameters(shrink, grow);
}
void resize(size_type hint) { rep.resize(hint); }
void rehash(size_type hint) { resize(hint); } // the tr1 name
// Lookup routines
iterator find(const key_type& key) { return rep.find(key); }
const_iterator find(const key_type& key) const { return rep.find(key); }
data_type& operator[](const key_type& key) { // This is our value-add!
// If key is in the hashtable, returns find(key)->second,
// otherwise returns insert(value_type(key, T()).first->second.
// Note it does not create an empty T unless the find fails.
return rep.template find_or_insert<DefaultValue>(key).second;
}
size_type count(const key_type& key) const { return rep.count(key); }
std::pair<iterator, iterator> equal_range(const key_type& key) {
return rep.equal_range(key);
}
std::pair<const_iterator, const_iterator> equal_range(const key_type& key)
const {
return rep.equal_range(key);
}
// Insertion routines
std::pair<iterator, bool> insert(const value_type& obj) {
return rep.insert(obj);
}
template <class InputIterator> void insert(InputIterator f, InputIterator l) {
rep.insert(f, l);
}
void insert(const_iterator f, const_iterator l) {
rep.insert(f, l);
}
// Required for std::insert_iterator; the passed-in iterator is ignored.
iterator insert(iterator, const value_type& obj) {
return insert(obj).first;
}
// Deletion routines
// THESE ARE NON-STANDARD! I make you specify an "impossible" key
// value to identify deleted buckets. You can change the key as
// time goes on, or get rid of it entirely to be insert-only.
void set_deleted_key(const key_type& key) {
rep.set_deleted_key(key);
}
void clear_deleted_key() { rep.clear_deleted_key(); }
key_type deleted_key() const { return rep.deleted_key(); }
// These are standard
size_type erase(const key_type& key) { return rep.erase(key); }
void erase(iterator it) { rep.erase(it); }
void erase(iterator f, iterator l) { rep.erase(f, l); }
// Comparison
bool operator==(const sparse_hash_map& hs) const { return rep == hs.rep; }
bool operator!=(const sparse_hash_map& hs) const { return rep != hs.rep; }
// I/O -- this is an add-on for writing metainformation to disk
//
// For maximum flexibility, this does not assume a particular
// file type (though it will probably be a FILE *). We just pass
// the fp through to rep.
// If your keys and values are simple enough, you can pass this
// serializer to serialize()/unserialize(). "Simple enough" means
// value_type is a POD type that contains no pointers. Note,
// however, we don't try to normalize endianness.
typedef typename ht::NopointerSerializer NopointerSerializer;
// serializer: a class providing operator()(OUTPUT*, const value_type&)
// (writing value_type to OUTPUT). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a
// pointer to a class providing size_t Write(const void*, size_t),
// which writes a buffer into a stream (which fp presumably
// owns) and returns the number of bytes successfully written.
// Note basic_ostream<not_char> is not currently supported.
template <typename ValueSerializer, typename OUTPUT>
bool serialize(ValueSerializer serializer, OUTPUT* fp) {
return rep.serialize(serializer, fp);
}
// serializer: a functor providing operator()(INPUT*, value_type*)
// (reading from INPUT and into value_type). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a
// pointer to a class providing size_t Read(void*, size_t),
// which reads into a buffer from a stream (which fp presumably
// owns) and returns the number of bytes successfully read.
// Note basic_istream<not_char> is not currently supported.
// NOTE: Since value_type is std::pair<const Key, T>, ValueSerializer
// may need to do a const cast in order to fill in the key.
// NOTE: if Key or T are not POD types, the serializer MUST use
// placement-new to initialize their values, rather than a normal
// equals-assignment or similar. (The value_type* passed into the
// serializer points to garbage memory.)
template <typename ValueSerializer, typename INPUT>
bool unserialize(ValueSerializer serializer, INPUT* fp) {
return rep.unserialize(serializer, fp);
}
// The four methods below are DEPRECATED.
// Use serialize() and unserialize() for new code.
template <typename OUTPUT>
bool write_metadata(OUTPUT *fp) { return rep.write_metadata(fp); }
template <typename INPUT>
bool read_metadata(INPUT *fp) { return rep.read_metadata(fp); }
template <typename OUTPUT>
bool write_nopointer_data(OUTPUT *fp) { return rep.write_nopointer_data(fp); }
template <typename INPUT>
bool read_nopointer_data(INPUT *fp) { return rep.read_nopointer_data(fp); }
};
// We need a global swap as well
template <class Key, class T, class HashFcn, class EqualKey, class Alloc>
inline void swap(sparse_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm1,
sparse_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm2) {
hm1.swap(hm2);
}
_END_GOOGLE_NAMESPACE_
#endif /* _SPARSE_HASH_MAP_H_ */

View File

@ -0,0 +1,338 @@
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
//
// This is just a very thin wrapper over sparsehashtable.h, just
// like sgi stl's stl_hash_set is a very thin wrapper over
// stl_hashtable. The major thing we define is operator[], because
// we have a concept of a data_type which stl_hashtable doesn't
// (it only has a key and a value).
//
// This is more different from sparse_hash_map than you might think,
// because all iterators for sets are const (you obviously can't
// change the key, and for sets there is no value).
//
// We adhere mostly to the STL semantics for hash-map. One important
// exception is that insert() may invalidate iterators entirely -- STL
// semantics are that insert() may reorder iterators, but they all
// still refer to something valid in the hashtable. Not so for us.
// Likewise, insert() may invalidate pointers into the hashtable.
// (Whether insert invalidates iterators and pointers depends on
// whether it results in a hashtable resize). On the plus side,
// delete() doesn't invalidate iterators or pointers at all, or even
// change the ordering of elements.
//
// Here are a few "power user" tips:
//
// 1) set_deleted_key():
// Unlike STL's hash_map, if you want to use erase() you
// *must* call set_deleted_key() after construction.
//
// 2) resize(0):
// When an item is deleted, its memory isn't freed right
// away. This allows you to iterate over a hashtable,
// and call erase(), without invalidating the iterator.
// To force the memory to be freed, call resize(0).
// For tr1 compatibility, this can also be called as rehash(0).
//
// 3) min_load_factor(0.0)
// Setting the minimum load factor to 0.0 guarantees that
// the hash table will never shrink.
//
// Roughly speaking:
// (1) dense_hash_set: fastest, uses the most memory unless entries are small
// (2) sparse_hash_set: slowest, uses the least memory
// (3) hash_set / unordered_set (STL): in the middle
//
// Typically I use sparse_hash_set when I care about space and/or when
// I need to save the hashtable on disk. I use hash_set otherwise. I
// don't personally use dense_hash_set ever; some people use it for
// small sets with lots of lookups.
//
// - dense_hash_set has, typically, about 78% memory overhead (if your
// data takes up X bytes, the hash_set uses .78X more bytes in overhead).
// - sparse_hash_set has about 4 bits overhead per entry.
// - sparse_hash_set can be 3-7 times slower than the others for lookup and,
// especially, inserts. See time_hash_map.cc for details.
//
// See /usr/(local/)?doc/sparsehash-*/sparse_hash_set.html
// for information about how to use this class.
#ifndef _SPARSE_HASH_SET_H_
#define _SPARSE_HASH_SET_H_
#include <sparsehash/internal/sparseconfig.h>
#include <algorithm> // needed by stl_alloc
#include <functional> // for equal_to<>
#include <memory> // for alloc (which we don't use)
#include <utility> // for pair<>
#include <sparsehash/internal/libc_allocator_with_realloc.h>
#include <sparsehash/internal/sparsehashtable.h> // IWYU pragma: export
#include HASH_FUN_H // for hash<>
_START_GOOGLE_NAMESPACE_
template <class Value,
class HashFcn = SPARSEHASH_HASH<Value>, // defined in sparseconfig.h
class EqualKey = std::equal_to<Value>,
class Alloc = libc_allocator_with_realloc<Value> >
class sparse_hash_set {
private:
// Apparently identity is not stl-standard, so we define our own
struct Identity {
typedef const Value& result_type;
const Value& operator()(const Value& v) const { return v; }
};
struct SetKey {
void operator()(Value* value, const Value& new_key) const {
*value = new_key;
}
};
typedef sparse_hashtable<Value, Value, HashFcn, Identity, SetKey,
EqualKey, Alloc> ht;
ht rep;
public:
typedef typename ht::key_type key_type;
typedef typename ht::value_type value_type;
typedef typename ht::hasher hasher;
typedef typename ht::key_equal key_equal;
typedef Alloc allocator_type;
typedef typename ht::size_type size_type;
typedef typename ht::difference_type difference_type;
typedef typename ht::const_pointer pointer;
typedef typename ht::const_pointer const_pointer;
typedef typename ht::const_reference reference;
typedef typename ht::const_reference const_reference;
typedef typename ht::const_iterator iterator;
typedef typename ht::const_iterator const_iterator;
typedef typename ht::const_local_iterator local_iterator;
typedef typename ht::const_local_iterator const_local_iterator;
// Iterator functions -- recall all iterators are const
iterator begin() const { return rep.begin(); }
iterator end() const { return rep.end(); }
// These come from tr1's unordered_set. For us, a bucket has 0 or 1 elements.
local_iterator begin(size_type i) const { return rep.begin(i); }
local_iterator end(size_type i) const { return rep.end(i); }
// Accessor functions
allocator_type get_allocator() const { return rep.get_allocator(); }
hasher hash_funct() const { return rep.hash_funct(); }
hasher hash_function() const { return hash_funct(); } // tr1 name
key_equal key_eq() const { return rep.key_eq(); }
// Constructors
explicit sparse_hash_set(size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, Identity(), SetKey(), alloc) {
}
template <class InputIterator>
sparse_hash_set(InputIterator f, InputIterator l,
size_type expected_max_items_in_table = 0,
const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& alloc = allocator_type())
: rep(expected_max_items_in_table, hf, eql, Identity(), SetKey(), alloc) {
rep.insert(f, l);
}
// We use the default copy constructor
// We use the default operator=()
// We use the default destructor
void clear() { rep.clear(); }
void swap(sparse_hash_set& hs) { rep.swap(hs.rep); }
// Functions concerning size
size_type size() const { return rep.size(); }
size_type max_size() const { return rep.max_size(); }
bool empty() const { return rep.empty(); }
size_type bucket_count() const { return rep.bucket_count(); }
size_type max_bucket_count() const { return rep.max_bucket_count(); }
// These are tr1 methods. bucket() is the bucket the key is or would be in.
size_type bucket_size(size_type i) const { return rep.bucket_size(i); }
size_type bucket(const key_type& key) const { return rep.bucket(key); }
float load_factor() const {
return size() * 1.0f / bucket_count();
}
float max_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return grow;
}
void max_load_factor(float new_grow) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(shrink, new_grow);
}
// These aren't tr1 methods but perhaps ought to be.
float min_load_factor() const {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
return shrink;
}
void min_load_factor(float new_shrink) {
float shrink, grow;
rep.get_resizing_parameters(&shrink, &grow);
rep.set_resizing_parameters(new_shrink, grow);
}
// Deprecated; use min_load_factor() or max_load_factor() instead.
void set_resizing_parameters(float shrink, float grow) {
rep.set_resizing_parameters(shrink, grow);
}
void resize(size_type hint) { rep.resize(hint); }
void rehash(size_type hint) { resize(hint); } // the tr1 name
// Lookup routines
iterator find(const key_type& key) const { return rep.find(key); }
size_type count(const key_type& key) const { return rep.count(key); }
std::pair<iterator, iterator> equal_range(const key_type& key) const {
return rep.equal_range(key);
}
// Insertion routines
std::pair<iterator, bool> insert(const value_type& obj) {
std::pair<typename ht::iterator, bool> p = rep.insert(obj);
return std::pair<iterator, bool>(p.first, p.second); // const to non-const
}
template <class InputIterator> void insert(InputIterator f, InputIterator l) {
rep.insert(f, l);
}
void insert(const_iterator f, const_iterator l) {
rep.insert(f, l);
}
// Required for std::insert_iterator; the passed-in iterator is ignored.
iterator insert(iterator, const value_type& obj) {
return insert(obj).first;
}
// Deletion routines
// THESE ARE NON-STANDARD! I make you specify an "impossible" key
// value to identify deleted buckets. You can change the key as
// time goes on, or get rid of it entirely to be insert-only.
void set_deleted_key(const key_type& key) { rep.set_deleted_key(key); }
void clear_deleted_key() { rep.clear_deleted_key(); }
key_type deleted_key() const { return rep.deleted_key(); }
// These are standard
size_type erase(const key_type& key) { return rep.erase(key); }
void erase(iterator it) { rep.erase(it); }
void erase(iterator f, iterator l) { rep.erase(f, l); }
// Comparison
bool operator==(const sparse_hash_set& hs) const { return rep == hs.rep; }
bool operator!=(const sparse_hash_set& hs) const { return rep != hs.rep; }
// I/O -- this is an add-on for writing metainformation to disk
//
// For maximum flexibility, this does not assume a particular
// file type (though it will probably be a FILE *). We just pass
// the fp through to rep.
// If your keys and values are simple enough, you can pass this
// serializer to serialize()/unserialize(). "Simple enough" means
// value_type is a POD type that contains no pointers. Note,
// however, we don't try to normalize endianness.
typedef typename ht::NopointerSerializer NopointerSerializer;
// serializer: a class providing operator()(OUTPUT*, const value_type&)
// (writing value_type to OUTPUT). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a
// pointer to a class providing size_t Write(const void*, size_t),
// which writes a buffer into a stream (which fp presumably
// owns) and returns the number of bytes successfully written.
// Note basic_ostream<not_char> is not currently supported.
template <typename ValueSerializer, typename OUTPUT>
bool serialize(ValueSerializer serializer, OUTPUT* fp) {
return rep.serialize(serializer, fp);
}
// serializer: a functor providing operator()(INPUT*, value_type*)
// (reading from INPUT and into value_type). You can specify a
// NopointerSerializer object if appropriate (see above).
// fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a
// pointer to a class providing size_t Read(void*, size_t),
// which reads into a buffer from a stream (which fp presumably
// owns) and returns the number of bytes successfully read.
// Note basic_istream<not_char> is not currently supported.
// NOTE: Since value_type is const Key, ValueSerializer
// may need to do a const cast in order to fill in the key.
// NOTE: if Key is not a POD type, the serializer MUST use
// placement-new to initialize its value, rather than a normal
// equals-assignment or similar. (The value_type* passed into
// the serializer points to garbage memory.)
template <typename ValueSerializer, typename INPUT>
bool unserialize(ValueSerializer serializer, INPUT* fp) {
return rep.unserialize(serializer, fp);
}
// The four methods below are DEPRECATED.
// Use serialize() and unserialize() for new code.
template <typename OUTPUT>
bool write_metadata(OUTPUT *fp) { return rep.write_metadata(fp); }
template <typename INPUT>
bool read_metadata(INPUT *fp) { return rep.read_metadata(fp); }
template <typename OUTPUT>
bool write_nopointer_data(OUTPUT *fp) { return rep.write_nopointer_data(fp); }
template <typename INPUT>
bool read_nopointer_data(INPUT *fp) { return rep.read_nopointer_data(fp); }
};
template <class Val, class HashFcn, class EqualKey, class Alloc>
inline void swap(sparse_hash_set<Val, HashFcn, EqualKey, Alloc>& hs1,
sparse_hash_set<Val, HashFcn, EqualKey, Alloc>& hs2) {
hs1.swap(hs2);
}
_END_GOOGLE_NAMESPACE_
#endif /* _SPARSE_HASH_SET_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,134 @@
// Copyright 2005 Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ----
//
// Template metaprogramming utility functions.
//
// This code is compiled directly on many platforms, including client
// platforms like Windows, Mac, and embedded systems. Before making
// any changes here, make sure that you're not breaking any platforms.
//
//
// The names choosen here reflect those used in tr1 and the boost::mpl
// library, there are similar operations used in the Loki library as
// well. I prefer the boost names for 2 reasons:
// 1. I think that portions of the Boost libraries are more likely to
// be included in the c++ standard.
// 2. It is not impossible that some of the boost libraries will be
// included in our own build in the future.
// Both of these outcomes means that we may be able to directly replace
// some of these with boost equivalents.
//
#ifndef BASE_TEMPLATE_UTIL_H_
#define BASE_TEMPLATE_UTIL_H_
#include <sparsehash/internal/sparseconfig.h>
_START_GOOGLE_NAMESPACE_
// Types small_ and big_ are guaranteed such that sizeof(small_) <
// sizeof(big_)
typedef char small_;
struct big_ {
char dummy[2];
};
// Identity metafunction.
template <class T>
struct identity_ {
typedef T type;
};
// integral_constant, defined in tr1, is a wrapper for an integer
// value. We don't really need this generality; we could get away
// with hardcoding the integer type to bool. We use the fully
// general integer_constant for compatibility with tr1.
template<class T, T v>
struct integral_constant {
static const T value = v;
typedef T value_type;
typedef integral_constant<T, v> type;
};
template <class T, T v> const T integral_constant<T, v>::value;
// Abbreviations: true_type and false_type are structs that represent boolean
// true and false values. Also define the boost::mpl versions of those names,
// true_ and false_.
typedef integral_constant<bool, true> true_type;
typedef integral_constant<bool, false> false_type;
typedef true_type true_;
typedef false_type false_;
// if_ is a templatized conditional statement.
// if_<cond, A, B> is a compile time evaluation of cond.
// if_<>::type contains A if cond is true, B otherwise.
template<bool cond, typename A, typename B>
struct if_{
typedef A type;
};
template<typename A, typename B>
struct if_<false, A, B> {
typedef B type;
};
// type_equals_ is a template type comparator, similar to Loki IsSameType.
// type_equals_<A, B>::value is true iff "A" is the same type as "B".
//
// New code should prefer base::is_same, defined in base/type_traits.h.
// It is functionally identical, but is_same is the standard spelling.
template<typename A, typename B>
struct type_equals_ : public false_ {
};
template<typename A>
struct type_equals_<A, A> : public true_ {
};
// and_ is a template && operator.
// and_<A, B>::value evaluates "A::value && B::value".
template<typename A, typename B>
struct and_ : public integral_constant<bool, (A::value && B::value)> {
};
// or_ is a template || operator.
// or_<A, B>::value evaluates "A::value || B::value".
template<typename A, typename B>
struct or_ : public integral_constant<bool, (A::value || B::value)> {
};
_END_GOOGLE_NAMESPACE_
#endif // BASE_TEMPLATE_UTIL_H_

View File

@ -0,0 +1,342 @@
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ----
//
// This code is compiled directly on many platforms, including client
// platforms like Windows, Mac, and embedded systems. Before making
// any changes here, make sure that you're not breaking any platforms.
//
// Define a small subset of tr1 type traits. The traits we define are:
// is_integral
// is_floating_point
// is_pointer
// is_enum
// is_reference
// is_pod
// has_trivial_constructor
// has_trivial_copy
// has_trivial_assign
// has_trivial_destructor
// remove_const
// remove_volatile
// remove_cv
// remove_reference
// add_reference
// remove_pointer
// is_same
// is_convertible
// We can add more type traits as required.
#ifndef BASE_TYPE_TRAITS_H_
#define BASE_TYPE_TRAITS_H_
#include <sparsehash/internal/sparseconfig.h>
#include <utility> // For pair
#include <sparsehash/template_util.h> // For true_type and false_type
_START_GOOGLE_NAMESPACE_
template <class T> struct is_integral;
template <class T> struct is_floating_point;
template <class T> struct is_pointer;
// MSVC can't compile this correctly, and neither can gcc 3.3.5 (at least)
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
// is_enum uses is_convertible, which is not available on MSVC.
template <class T> struct is_enum;
#endif
template <class T> struct is_reference;
template <class T> struct is_pod;
template <class T> struct has_trivial_constructor;
template <class T> struct has_trivial_copy;
template <class T> struct has_trivial_assign;
template <class T> struct has_trivial_destructor;
template <class T> struct remove_const;
template <class T> struct remove_volatile;
template <class T> struct remove_cv;
template <class T> struct remove_reference;
template <class T> struct add_reference;
template <class T> struct remove_pointer;
template <class T, class U> struct is_same;
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
template <class From, class To> struct is_convertible;
#endif
// is_integral is false except for the built-in integer types. A
// cv-qualified type is integral if and only if the underlying type is.
template <class T> struct is_integral : false_type { };
template<> struct is_integral<bool> : true_type { };
template<> struct is_integral<char> : true_type { };
template<> struct is_integral<unsigned char> : true_type { };
template<> struct is_integral<signed char> : true_type { };
#if defined(_MSC_VER)
// wchar_t is not by default a distinct type from unsigned short in
// Microsoft C.
// See http://msdn2.microsoft.com/en-us/library/dh8che7s(VS.80).aspx
template<> struct is_integral<__wchar_t> : true_type { };
#else
template<> struct is_integral<wchar_t> : true_type { };
#endif
template<> struct is_integral<short> : true_type { };
template<> struct is_integral<unsigned short> : true_type { };
template<> struct is_integral<int> : true_type { };
template<> struct is_integral<unsigned int> : true_type { };
template<> struct is_integral<long> : true_type { };
template<> struct is_integral<unsigned long> : true_type { };
#ifdef HAVE_LONG_LONG
template<> struct is_integral<long long> : true_type { };
template<> struct is_integral<unsigned long long> : true_type { };
#endif
template <class T> struct is_integral<const T> : is_integral<T> { };
template <class T> struct is_integral<volatile T> : is_integral<T> { };
template <class T> struct is_integral<const volatile T> : is_integral<T> { };
// is_floating_point is false except for the built-in floating-point types.
// A cv-qualified type is integral if and only if the underlying type is.
template <class T> struct is_floating_point : false_type { };
template<> struct is_floating_point<float> : true_type { };
template<> struct is_floating_point<double> : true_type { };
template<> struct is_floating_point<long double> : true_type { };
template <class T> struct is_floating_point<const T>
: is_floating_point<T> { };
template <class T> struct is_floating_point<volatile T>
: is_floating_point<T> { };
template <class T> struct is_floating_point<const volatile T>
: is_floating_point<T> { };
// is_pointer is false except for pointer types. A cv-qualified type (e.g.
// "int* const", as opposed to "int const*") is cv-qualified if and only if
// the underlying type is.
template <class T> struct is_pointer : false_type { };
template <class T> struct is_pointer<T*> : true_type { };
template <class T> struct is_pointer<const T> : is_pointer<T> { };
template <class T> struct is_pointer<volatile T> : is_pointer<T> { };
template <class T> struct is_pointer<const volatile T> : is_pointer<T> { };
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
namespace internal {
template <class T> struct is_class_or_union {
template <class U> static small_ tester(void (U::*)());
template <class U> static big_ tester(...);
static const bool value = sizeof(tester<T>(0)) == sizeof(small_);
};
// is_convertible chokes if the first argument is an array. That's why
// we use add_reference here.
template <bool NotUnum, class T> struct is_enum_impl
: is_convertible<typename add_reference<T>::type, int> { };
template <class T> struct is_enum_impl<true, T> : false_type { };
} // namespace internal
// Specified by TR1 [4.5.1] primary type categories.
// Implementation note:
//
// Each type is either void, integral, floating point, array, pointer,
// reference, member object pointer, member function pointer, enum,
// union or class. Out of these, only integral, floating point, reference,
// class and enum types are potentially convertible to int. Therefore,
// if a type is not a reference, integral, floating point or class and
// is convertible to int, it's a enum. Adding cv-qualification to a type
// does not change whether it's an enum.
//
// Is-convertible-to-int check is done only if all other checks pass,
// because it can't be used with some types (e.g. void or classes with
// inaccessible conversion operators).
template <class T> struct is_enum
: internal::is_enum_impl<
is_same<T, void>::value ||
is_integral<T>::value ||
is_floating_point<T>::value ||
is_reference<T>::value ||
internal::is_class_or_union<T>::value,
T> { };
template <class T> struct is_enum<const T> : is_enum<T> { };
template <class T> struct is_enum<volatile T> : is_enum<T> { };
template <class T> struct is_enum<const volatile T> : is_enum<T> { };
#endif
// is_reference is false except for reference types.
template<typename T> struct is_reference : false_type {};
template<typename T> struct is_reference<T&> : true_type {};
// We can't get is_pod right without compiler help, so fail conservatively.
// We will assume it's false except for arithmetic types, enumerations,
// pointers and cv-qualified versions thereof. Note that std::pair<T,U>
// is not a POD even if T and U are PODs.
template <class T> struct is_pod
: integral_constant<bool, (is_integral<T>::value ||
is_floating_point<T>::value ||
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
// is_enum is not available on MSVC.
is_enum<T>::value ||
#endif
is_pointer<T>::value)> { };
template <class T> struct is_pod<const T> : is_pod<T> { };
template <class T> struct is_pod<volatile T> : is_pod<T> { };
template <class T> struct is_pod<const volatile T> : is_pod<T> { };
// We can't get has_trivial_constructor right without compiler help, so
// fail conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial
// constructors. (3) array of a type with a trivial constructor.
// (4) const versions thereof.
template <class T> struct has_trivial_constructor : is_pod<T> { };
template <class T, class U> struct has_trivial_constructor<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_constructor<T>::value &&
has_trivial_constructor<U>::value)> { };
template <class A, int N> struct has_trivial_constructor<A[N]>
: has_trivial_constructor<A> { };
template <class T> struct has_trivial_constructor<const T>
: has_trivial_constructor<T> { };
// We can't get has_trivial_copy right without compiler help, so fail
// conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial copy
// constructors. (3) array of a type with a trivial copy constructor.
// (4) const versions thereof.
template <class T> struct has_trivial_copy : is_pod<T> { };
template <class T, class U> struct has_trivial_copy<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_copy<T>::value &&
has_trivial_copy<U>::value)> { };
template <class A, int N> struct has_trivial_copy<A[N]>
: has_trivial_copy<A> { };
template <class T> struct has_trivial_copy<const T> : has_trivial_copy<T> { };
// We can't get has_trivial_assign right without compiler help, so fail
// conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial copy
// constructors. (3) array of a type with a trivial assign constructor.
template <class T> struct has_trivial_assign : is_pod<T> { };
template <class T, class U> struct has_trivial_assign<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_assign<T>::value &&
has_trivial_assign<U>::value)> { };
template <class A, int N> struct has_trivial_assign<A[N]>
: has_trivial_assign<A> { };
// We can't get has_trivial_destructor right without compiler help, so
// fail conservatively. We will assume it's false except for: (1) types
// for which is_pod is true. (2) std::pair of types with trivial
// destructors. (3) array of a type with a trivial destructor.
// (4) const versions thereof.
template <class T> struct has_trivial_destructor : is_pod<T> { };
template <class T, class U> struct has_trivial_destructor<std::pair<T, U> >
: integral_constant<bool,
(has_trivial_destructor<T>::value &&
has_trivial_destructor<U>::value)> { };
template <class A, int N> struct has_trivial_destructor<A[N]>
: has_trivial_destructor<A> { };
template <class T> struct has_trivial_destructor<const T>
: has_trivial_destructor<T> { };
// Specified by TR1 [4.7.1]
template<typename T> struct remove_const { typedef T type; };
template<typename T> struct remove_const<T const> { typedef T type; };
template<typename T> struct remove_volatile { typedef T type; };
template<typename T> struct remove_volatile<T volatile> { typedef T type; };
template<typename T> struct remove_cv {
typedef typename remove_const<typename remove_volatile<T>::type>::type type;
};
// Specified by TR1 [4.7.2] Reference modifications.
template<typename T> struct remove_reference { typedef T type; };
template<typename T> struct remove_reference<T&> { typedef T type; };
template <typename T> struct add_reference { typedef T& type; };
template <typename T> struct add_reference<T&> { typedef T& type; };
// Specified by TR1 [4.7.4] Pointer modifications.
template<typename T> struct remove_pointer { typedef T type; };
template<typename T> struct remove_pointer<T*> { typedef T type; };
template<typename T> struct remove_pointer<T* const> { typedef T type; };
template<typename T> struct remove_pointer<T* volatile> { typedef T type; };
template<typename T> struct remove_pointer<T* const volatile> {
typedef T type; };
// Specified by TR1 [4.6] Relationships between types
template<typename T, typename U> struct is_same : public false_type { };
template<typename T> struct is_same<T, T> : public true_type { };
// Specified by TR1 [4.6] Relationships between types
#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)
namespace internal {
// This class is an implementation detail for is_convertible, and you
// don't need to know how it works to use is_convertible. For those
// who care: we declare two different functions, one whose argument is
// of type To and one with a variadic argument list. We give them
// return types of different size, so we can use sizeof to trick the
// compiler into telling us which function it would have chosen if we
// had called it with an argument of type From. See Alexandrescu's
// _Modern C++ Design_ for more details on this sort of trick.
template <typename From, typename To>
struct ConvertHelper {
static small_ Test(To);
static big_ Test(...);
static From Create();
};
} // namespace internal
// Inherits from true_type if From is convertible to To, false_type otherwise.
template <typename From, typename To>
struct is_convertible
: integral_constant<bool,
sizeof(internal::ConvertHelper<From, To>::Test(
internal::ConvertHelper<From, To>::Create()))
== sizeof(small_)> {
};
#endif
_END_GOOGLE_NAMESPACE_
// Right now these macros are no-ops, and mostly just document the fact
// these types are PODs, for human use. They may be made more contentful
// later. The typedef is just to make it legal to put a semicolon after
// these macros.
#define DECLARE_POD(TypeName) typedef int Dummy_Type_For_DECLARE_POD
#define DECLARE_NESTED_POD(TypeName) DECLARE_POD(TypeName)
#define PROPAGATE_POD_FROM_TEMPLATE_ARGUMENT(TemplateName) \
typedef int Dummy_Type_For_PROPAGATE_POD_FROM_TEMPLATE_ARGUMENT
#define ENFORCE_POD(TypeName) typedef int Dummy_Type_For_ENFORCE_POD
#endif // BASE_TYPE_TRAITS_H_

View File

@ -1,12 +1,8 @@
#ifndef ATESTS_H #ifndef ATESTS_H
#define ATESTS_H #define ATESTS_H
#include <string>
#include "./tests.h"
#include <ostream>
#include <iostream>
#include <thread>
#include <fstream>
#include <fstream>
#include "./tests.h"
vector<int> sizes = { vector<int> sizes = {
50000, 100000, 150000, 200000, 250000, 300000, 350000, 400000, 500000, 50000, 100000, 150000, 200000, 250000, 300000, 350000, 400000, 500000,

View File

@ -7,8 +7,8 @@
// hashmaps and hash // hashmaps and hash
#include <unordered_map> #include <unordered_map>
#include <sparsehash/sparse_hash_map> #include "3thparty/sparsehash/sparse_hash_map"
#include <sparsehash/dense_hash_map> #include "3thparty/sparsehash/dense_hash_map"
#include "./3thparty/abseil-cpp/absl/container/node_hash_map.h" #include "./3thparty/abseil-cpp/absl/container/node_hash_map.h"
#include "./3thparty/abseil-cpp/absl/container/flat_hash_map.h" #include "./3thparty/abseil-cpp/absl/container/flat_hash_map.h"
#include "./3thparty/abseil-cpp/absl/hash/hash.h" #include "./3thparty/abseil-cpp/absl/hash/hash.h"
@ -24,29 +24,22 @@
#include "./3thparty/parallel_hashmap/phmap.h" #include "./3thparty/parallel_hashmap/phmap.h"
#include "./3thparty/emilib/hash_map.hpp" #include "./3thparty/emilib/hash_map.hpp"
#include "3thparty/robinhood/robin_hood.h" #include "3thparty/robinhood/robin_hood.h"
#include <type_traits>
using std::string; using std::string;
using absl::Hash; using absl::Hash;
template<class T>
// since my testing is based on this hashmap, this one doesn't need no prep void prepare(T& map, int size){
void prepare(std::unordered_map<int, int>& map,int size){
map.reserve(size);
}
void prepare(std::unordered_map<string, string>& map,int size){
map.reserve(size); map.reserve(size);
} }
// goooooogle
void prepare(google::sparse_hash_map<int, int>& map, int size){ void prepare(google::sparse_hash_map<int, int>& map, int size){
map.set_deleted_key(0); map.set_deleted_key(-1);
} }
void prepare(google::sparse_hash_map<string, string>& map, int size){ void prepare(google::sparse_hash_map<string, string>& map, int size){
map.set_deleted_key(""); map.set_deleted_key("a");
} }
void prepare(google::dense_hash_map<int, int>& map, int size){ void prepare(google::dense_hash_map<int, int>& map, int size){
map.set_empty_key(0); map.set_empty_key(0);
map.set_deleted_key(-1); map.set_deleted_key(-1);
@ -56,124 +49,4 @@ void prepare(google::dense_hash_map<string, string>& map, int size){
map.set_empty_key(""); map.set_empty_key("");
} }
//absl
void prepare(absl::node_hash_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(absl::node_hash_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(absl::flat_hash_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(absl::flat_hash_map<string, string>& map, int size){
map.reserve(size);
}
// tessil
void prepare(tsl::sparse_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(tsl::sparse_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(tsl::array_map<int, int>& map, int size){
// map.reserve(size);
}
void prepare(tsl::array_map<string, string>& map, int size){
// map.reserve(size);
}
void prepare(tsl::ordered_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(tsl::ordered_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(tsl::robin_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(tsl::robin_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(tsl::hopscotch_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(tsl::hopscotch_map<string, string>& map, int size){
map.reserve(size);
}
// booooooost
void prepare(boost::unordered_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(boost::unordered_map<string, string>& map, int size){
map.reserve(size);
}
// skarupke's maps
void prepare(ska::bytell_hash_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(ska::bytell_hash_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(ska::flat_hash_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(ska::flat_hash_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(ska::unordered_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(ska::unordered_map<string, string>& map, int size){
map.reserve(size);
}
// Gregory Popovitch' paralel hashmaps
void prepare(phmap::parallel_flat_hash_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(phmap::parallel_flat_hash_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(phmap::parallel_node_hash_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(phmap::parallel_node_hash_map<string, string>& map, int size){
map.reserve(size);
}
// emilib (Emil Ernerfeld library)
void prepare(emilib::HashMap<int, int>& map, int size){
map.reserve(size);
}
void prepare(emilib::HashMap<string, string>& map, int size){
map.reserve(size);
}
// martin robinhood
void prepare(robin_hood::unordered_flat_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(robin_hood::unordered_flat_map<string, string>& map, int size){
map.reserve(size);
}
void prepare(robin_hood::unordered_node_map<int, int>& map, int size){
map.reserve(size);
}
void prepare(robin_hood::unordered_node_map<string, string>& map, int size){
map.reserve(size);
}
#endif /* TESTS_H */ #endif /* TESTS_H */

View File

@ -2,10 +2,12 @@
#ifndef TESTS_H #ifndef TESTS_H
#define TESTS_H #define TESTS_H
#include <iostream>
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
#include <iterator> #include <iterator>
#include <chrono> #include <chrono>
#include <string>
// own // own
#include "./generator.h" #include "./generator.h"
@ -17,8 +19,6 @@ using std::vector;
using std::cout; using std::cout;
template <class T> template <class T>
vector<int> int_test(T map, int size){ vector<int> int_test(T map, int size){
vector<int> results; // insert, lookup, unsuccesful lookup, delete times vector<int> results; // insert, lookup, unsuccesful lookup, delete times

View File

@ -1,14 +1,11 @@
#include <iostream> #include "./includes/aggregate_tests.h"
#include <functional>
#include "./src/includes/aggregate_tests.h" // // we can use typedefs and switch statements for the map implementations
// // we can use to switch the map implementations to that
// // we can add some cli handling so we can specify which maps to tests (or all) // // we can add some cli handling so we can specify which maps to tests (or all)
// typedef std::unordered_map<int, int> intmap; typedef std::unordered_map<int, int> intmap;
// typedef std::unordered_map<string, string> stringmap; typedef std::unordered_map<string, string> stringmap;
//
// // google sparse // // google sparse
// typedef google::sparse_hash_map<int, int> intmap; // typedef google::sparse_hash_map<int, int> intmap;
// typedef google::sparse_hash_map<string,string> stringmap; // typedef google::sparse_hash_map<string,string> stringmap;
@ -18,8 +15,8 @@
// typedef google::dense_hash_map<string,string> stringmap; // typedef google::dense_hash_map<string,string> stringmap;
// //
// // abseil nodehashmap // // abseil nodehashmap
typedef absl::node_hash_map<int, int> intmap; // typedef absl::node_hash_map<int, int> intmap;
typedef absl::node_hash_map<string,string> stringmap; // typedef absl::node_hash_map<string,string> stringmap;
// //
// // flat hashmap // // flat hashmap
// typedef absl::flat_hash_map<int, int> intmap; // typedef absl::flat_hash_map<int, int> intmap;
@ -74,43 +71,44 @@ typedef absl::node_hash_map<string,string> stringmap;
// // martin flat map // // martin flat map
// typedef robin_hood::unordered_flat_map<int, int> intmap; // typedef robin_hood::unordered_flat_map<int, int> intmap;
// typedef robin_hood::unordered_flat_map<string,string> stringmap; // typedef robin_hood::unordered_flat_map<string,string> stringmap;
// // martin flat map // martin flat map
// typedef robin_hood::unordered_node_map<int, int> intmap; // typedef robin_hood::unordered_node_map<int, int> intmap;
// typedef robin_hood::unordered_node_map<string,string> stringmap; // typedef robin_hood::unordered_node_map<string,string> stringmap;
/* test takes 2hrs for 30 runs for unordered_map hashmap
// if the other maps have about the same operation times, it'll
// take around ~40hrs total ************
// priorities are that the interface must be the same/similar to unordered_map
// and that we don't have to jump through hoops to get it to work
1. Google dense_hash_map [y] https://github.com/sparsehash/sparsehash
2. Google sparse_hash_map [y]
3. abseil node_hash_map [y] https://abseil.io/docs/cpp/tools/cmake-installs
4. abseil flat_hash_map [y]
5. Tessil/sparse-map/ [y] header only implementation for all tessil
6. Tessil/hopscotch-map[y]
7. tessil/robin-map[y] [y]
8. Boost unordered_map [y] just install boost with your package manager
9. skarupke/flat_hash_map [y] header only implementation
10. skarupke /bytell_hash_map [y]
11. skarupke/unordered_map [y]
12. greg7mdp/parallel-hashmap/paralel_flat [y] header only
13. greg7mdp/parallel-hashmap/paralel_node [y]
17. emilk/emilib emilib::hashmap [y] header only
18. martinus robin_hood::unordered_node_map [y]
19. martinus/robin_hood/ flatmap [y]
3. folly F14ValueMap // lots of dependencies, even tho it really seems cool
4. folly F14NodeMap
5. Tessil/ordered-map [n] something is wrong with this one, verrrrry slow
6. Tessil/array-hash[n] (not with a small modification of the insert function)
*/
int main() { int main() {
time_point<steady_clock> start_test = steady_clock::now(); time_point<steady_clock> start_test = steady_clock::now();
// string_test(stringmap{}, 1); // process gets killed for sizes >35000
int_test_aggregate(intmap{}, 2); int_test_aggregate(intmap{}, 2);
string_test_aggregate(stringmap{}, 2); string_test_aggregate(stringmap{}, 2);
time_point<steady_clock> end_test = steady_clock::now(); time_point<steady_clock> end_test = steady_clock::now();
std::cout << "\n\n 30 runs for all tests for 1 map: " << duration_cast<seconds>(end_test-start_test).count() << " seconds\n\n"; std::cout << "\n\n 30 runs for all tests for 1 map: " << duration_cast<seconds>(end_test-start_test).count() << " seconds\n\n";
// test takes 2hrs for 30 runs for one hashmap
/* if the other maps have about the same operation times ************
// possible maps to bench. priorities are that the interface must be the same/similar to unordered_map
// and that we don't have to jump through hoops to get it to work
1. Google dense_hash_map [y] https://github.com/sparsehash/sparsehash
2. Google sparse_hash_map [y]
3. abseil node_hash_map [y] https://abseil.io/docs/cpp/tools/cmake-installs
4. abseil flat_hash_map [y]
5. Tessil/sparse-map/ [y] header only implementation for all tessil
6. Tessil/hopscotch-map[y]
7. tessil/robin-map[y] [y]
8. Boost unordered_map [y] just install boost with your package manager
9. skarupke/flat_hash_map [y] header only implementation
10. skarupke /bytell_hash_map [y]
11. skarupke/unordered_map [y]
12. greg7mdp/parallel-hashmap/paralel_flat [y] header only
13. greg7mdp/parallel-hashmap/paralel_node [y]
17. emilk/emilib emilib::hashmap [y] header only
18. martinus robin_hood::unordered_node_map [y]
19. martinus/robin_hood/ flatmap [y]
3. folly F14ValueMap
4. folly F14NodeMap
5. Tessil/ordered-map [n] something is wrong with this one, verrrrry slow
6. Tessil/array-hash[n] (not with a small modification of the insert function)
*/
} }