diff --git a/.vscode/settings.json b/.vscode/settings.json index 66c11a2..1e379f2 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -58,6 +58,7 @@ "thread": "cpp", "cinttypes": "cpp", "typeinfo": "cpp", - "variant": "cpp" + "variant": "cpp", + "sparse_hash_map": "cpp" } } \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 3bdca81..e84f5f5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,12 +3,29 @@ project(another_studproject) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_FLAGS "-O3 -flto=thin -march=native") -add_executable(another_studproject + + +# Process Abseil's CMake build system +add_subdirectory(./src/includes/3thparty/abseil-cpp + ./src/includes/3thparty/tsl + ) + + +add_executable(studproject ./src/includes/generator.h ./src/generator.cpp ./src/includes/tests.h ./src/includes/aggregate_tests.h + ./src/includes/3thparty/emilib/loguru.cpp main.cpp +) + +target_link_libraries(studproject + absl::hash + absl::node_hash_map + absl::flat_hash_map + pthread + dl +) -) \ No newline at end of file diff --git a/main.cpp b/main.cpp index 362ecb8..40bbd52 100644 --- a/main.cpp +++ b/main.cpp @@ -1,53 +1,116 @@ #include -#include #include -#include -// #include #include "./src/includes/aggregate_tests.h" +// // we can use to switch the map implementations to that +// // we can add some cli handling so we can specify which maps to tests (or all) - - +// typedef std::unordered_map intmap; +// typedef std::unordered_map stringmap; +// +// // google sparse // typedef google::sparse_hash_map intmap; -// typedef google::sparse_hash_map stringmap -typedef std::unordered_map intmap; -typedef std::unordered_map stringmap; -// we can use ^ to switch the map implementations to that -// we can add some cli handling so we can specify which maps to tests (or all) +// typedef google::sparse_hash_map stringmap; +// +// // google dense +// typedef google::dense_hash_map intmap; +// typedef google::dense_hash_map stringmap; +// +// // abseil nodehashmap +typedef absl::node_hash_map intmap; +typedef absl::node_hash_map stringmap; +// +// // flat hashmap +// typedef absl::flat_hash_map intmap; +// typedef absl::flat_hash_map stringmap; +// +// // tessil flat hashmap +// typedef tsl::sparse_map intmap; +// typedef tsl::sparse_map stringmap; +// +// // Tessil tsl::array_map +// typedef tsl::array_map intmap; +// typedef tsl::array_map stringmap; +// +// // Tessil tsl::ordered_map +// typedef tsl::ordered_map intmap; +// typedef tsl::ordered_map stringmap; +// +// // Tessil tsl::robin_map +// typedef tsl::robin_map intmap; +// typedef tsl::robin_map stringmap; +// +// // Tessil hopscotch_map +// typedef tsl::hopscotch_map intmap; +// typedef tsl::hopscotch_map stringmap; +// +// // Boost::unordered_map +// typedef boost::unordered_map intmap; +// typedef boost::unordered_map stringmap; +// +// // skarupke's unordered map +// typedef ska::unordered_map intmap; +// typedef ska::unordered_map stringmap; +// +// // skarupke's bytell hash map +// typedef ska::bytell_hash_map intmap; +// typedef ska::bytell_hash_map stringmap; +// +// // skarupke's flat hash map +// typedef ska::flat_hash_map intmap; +// typedef ska::flat_hash_map stringmap; +// +// // greg7mdp's flat hash map +// typedef phmap::parallel_flat_hash_map intmap; +// typedef phmap::parallel_flat_hash_map stringmap; +// +// // greg7mdp's hash map +// typedef phmap::parallel_node_hash_map intmap; +// typedef phmap::parallel_node_hash_map stringmap; +// // emilib's hash map +// typedef emilib::HashMap intmap; +// typedef emilib::HashMap stringmap; +// // martin flat map +// typedef robin_hood::unordered_flat_map intmap; +// typedef robin_hood::unordered_flat_map stringmap; +// // martin flat map +// typedef robin_hood::unordered_node_map intmap; +// typedef robin_hood::unordered_node_map stringmap; + + int main() { time_point start_test = steady_clock::now(); // string_test(stringmap{}, 1); // process gets killed for sizes >35000 - int_test_aggregate(intmap{}, 30); - string_test_aggregate(stringmap{}, 30); + int_test_aggregate(intmap{}, 2); + string_test_aggregate(stringmap{}, 2); time_point end_test = steady_clock::now(); std::cout << "\n\n 30 runs for all tests for 1 map: " << duration_cast(end_test-start_test).count() << " seconds\n\n"; - // test takes 52 mins for 10 runs for one hashmap - // so it'll take ~3 hours per map if we want 30 runs per test + // test takes 2hrs for 30 runs for one hashmap /* if the other maps have about the same operation times ************ - - // maps to benchmark: - 1. Google dense_hash_map - 2. Google sparse_hash_map + // possible maps to bench. priorities are that the interface must be the same/similar to unordered_map + // and that we don't have to jump through hoops to get it to work + 1. Google dense_hash_map [y] https://github.com/sparsehash/sparsehash + 2. Google sparse_hash_map [y] + 3. abseil node_hash_map [y] https://abseil.io/docs/cpp/tools/cmake-installs + 4. abseil flat_hash_map [y] + 5. Tessil/sparse-map/ [y] header only implementation for all tessil + 6. Tessil/hopscotch-map[y] + 7. tessil/robin-map[y] [y] + 8. Boost unordered_map [y] just install boost with your package manager + 9. skarupke/flat_hash_map [y] header only implementation + 10. skarupke /bytell_hash_map [y] + 11. skarupke/unordered_map [y] + 12. greg7mdp/parallel-hashmap/paralel_flat [y] header only + 13. greg7mdp/parallel-hashmap/paralel_node [y] + 17. emilk/emilib emilib::hashmap [y] header only + 18. martinus robin_hood::unordered_node_map [y] + 19. martinus/robin_hood/ flatmap [y] 3. folly F14ValueMap 4. folly F14NodeMap - 5. Tessil/ordered-map - 6. Tessil/array-hash - 7. Tessil/hopscotch-map - 8. Tessil/sparse-map/ - 9. abseil node_hash_map - 10. abseil flat_hash_map - 11. Glib GHashTable - 12. Boost unordered_map - 13. Qt QHash - 14. skarupke/flat_hash_map - 15. greg7mdp/sparsepp - 16. greg7mdp /parallel-hashmap (phmap::flat_hash_map and phmap::node_hash_map) - 17. emilk/emilib emilib::hashmap - 18. martinus robin_hood::unordered_node_map - 19. martinus/robin-hood-hashing/ - 20. skarupke /flat_hash_map + 5. Tessil/ordered-map [n] something is wrong with this one, verrrrry slow + 6. Tessil/array-hash[n] (not with a small modification of the insert function) */ } diff --git a/results.csv b/results.csv index e69de29..72e08bd 100644 --- a/results.csv +++ b/results.csv @@ -0,0 +1,305 @@ + +int_insert, 'google::dense_hash_map', 26, 34, 64, 40, 40, 50, 40, 44, 51, 45, 77, 47, 51, 57 +int_succ_lookup, 'google::dense_hash_map', 10, 18, 30, 20, 18, 22, 21, 23, 24, 23, 27, 24, 25, 26 +int_nosucc_lookup, 'google::dense_hash_map', 31, 33, 62, 54, 43, 40, 43, 49, 56, 42, 53, 52, 55, 59 +int_delete, 'google::dense_hash_map', 13, 11, 26, 38, 16, 13, 15, 17, 22, 15, 32, 19, 23, 24 +int_insert, 'google::dense_hash_map', 19, 53, 38, 36, 40, 36, 40, 43, 51, 41, 44, 48, 52, 59 +int_succ_lookup, 'google::dense_hash_map', 7, 19, 20, 17, 18, 20, 21, 22, 24, 23, 24, 24, 27, 26 +int_nosucc_lookup, 'google::dense_hash_map', 23, 72, 39, 36, 43, 40, 43, 50, 56, 42, 50, 49, 57, 59 +int_delete, 'google::dense_hash_map', 9, 35, 17, 13, 17, 16, 15, 21, 22, 17, 22, 18, 24, 24 +int_insert, 'google::dense_hash_map, std::__cxx11::basic_string >', 146, 155, 159, 175, 178, 161, 167, 179, 195, 185, 191, 202, 205, 231 +int_succ_lookup, 'google::dense_hash_map, std::__cxx11::basic_string >', 87, 98, 110, 106, 110, 117, 113, 112, 121, 117, 119, 119, 129, 128 +int_nosucc_lookup, 'google::dense_hash_map, std::__cxx11::basic_string >', 189, 160, 184, 170, 182, 200, 189, 184, 192, 170, 188, 185, 199, 193 +int_delete, 'google::dense_hash_map, std::__cxx11::basic_string >', 91, 91, 113, 119, 108, 134, 103, 104, 110, 105, 108, 109, 118, 131 +int_insert, 'google::dense_hash_map, std::__cxx11::basic_string >', 213, 154, 148, 169, 185, 162, 170, 175, 192, 180, 186, 197, 206, 214 +int_succ_lookup, 'google::dense_hash_map, std::__cxx11::basic_string >', 122, 91, 98, 103, 109, 101, 107, 107, 114, 114, 120, 123, 127, 141 +int_nosucc_lookup, 'google::dense_hash_map, std::__cxx11::basic_string >', 234, 166, 159, 174, 183, 160, 173, 181, 184, 169, 183, 194, 200, 194 +int_delete, 'google::dense_hash_map, std::__cxx11::basic_string >', 126, 90, 89, 111, 105, 94, 96, 102, 112, 111, 110, 111, 123, 124 +int_insert, 'absl::node_hash_map', 101, 116, 91, 90, 101, 99, 107, 106, 114, 114, 123, 161, 138, 130 +int_succ_lookup, 'absl::node_hash_map', 24, 87, 40, 40, 52, 48, 49, 49, 58, 59, 61, 68, 61, 73 +int_nosucc_lookup, 'absl::node_hash_map', 464, 141, 90, 95, 99, 98, 100, 105, 112, 114, 118, 134, 144, 132 +int_delete, 'absl::node_hash_map', 140, 257, 139, 126, 138, 145, 146, 145, 189, 182, 179, 222, 201, 270 +int_insert, 'absl::node_hash_map', 73, 110, 83, 90, 103, 99, 101, 105, 116, 118, 120, 130, 142, 134 +int_succ_lookup, 'absl::node_hash_map', 19, 64, 38, 40, 52, 47, 50, 51, 62, 61, 61, 61, 61, 70 +int_nosucc_lookup, 'absl::node_hash_map', 502, 192, 90, 90, 101, 99, 111, 105, 125, 117, 122, 127, 141, 126 +int_delete, 'absl::node_hash_map', 95, 269, 110, 121, 144, 139, 175, 146, 190, 187, 187, 187, 194, 222 +int_insert, 'absl::node_hash_map, std::__cxx11::basic_string >', 127, 163, 171, 180, 190, 187, 193, 226, 208, 235, 243, 286, 287, 228 +int_succ_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 185, 164, 173, 178, 204, 197, 219, 206, 231, 236, 240, 290, 240, 278 +int_nosucc_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 938, 139, 133, 145, 145, 152, 333, 205, 167, 189, 187, 268, 221, 183 +int_delete, 'absl::node_hash_map, std::__cxx11::basic_string >', 244, 223, 191, 190, 208, 211, 326, 303, 250, 253, 252, 292, 259, 289 +int_insert, 'absl::node_hash_map, std::__cxx11::basic_string >', 130, 148, 169, 174, 187, 197, 191, 203, 220, 211, 218, 237, 259, 232 +int_succ_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 106, 142, 169, 178, 194, 199, 199, 199, 248, 236, 239, 247, 243, 286 +int_nosucc_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 844, 133, 129, 140, 144, 146, 147, 158, 164, 164, 168, 178, 200, 188 +int_delete, 'absl::node_hash_map, std::__cxx11::basic_string >', 223, 163, 179, 187, 213, 210, 233, 225, 244, 249, 275, 260, 335, 296 +int_insert, 'absl::flat_hash_map', 42, 53, 45, 44, 55, 45, 41, 41, 55, 55, 52, 58, 69, 71 +int_succ_lookup, 'absl::flat_hash_map', 25, 58, 53, 31, 68, 29, 25, 24, 35, 34, 35, 35, 34, 52 +int_nosucc_lookup, 'absl::flat_hash_map', 334, 76, 54, 59, 73, 58, 50, 56, 67, 64, 63, 69, 79, 83 +int_delete, 'absl::flat_hash_map', 101, 61, 58, 51, 60, 48, 33, 35, 51, 46, 47, 48, 47, 69 +int_insert, 'absl::flat_hash_map', 43, 29, 76, 40, 51, 38, 38, 45, 86, 52, 52, 57, 69, 70 +int_succ_lookup, 'absl::flat_hash_map', 16, 10, 52, 26, 40, 25, 24, 26, 56, 33, 38, 34, 35, 44 +int_nosucc_lookup, 'absl::flat_hash_map', 188, 36, 74, 63, 63, 50, 49, 55, 65, 61, 65, 68, 79, 80 +int_delete, 'absl::flat_hash_map', 29, 26, 157, 59, 57, 32, 32, 36, 48, 45, 49, 49, 46, 66 +int_insert, 'absl::flat_hash_map, std::__cxx11::basic_string >', 129, 155, 198, 174, 188, 187, 188, 194, 212, 213, 216, 224, 239, 240 +int_succ_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 88, 148, 140, 115, 139, 133, 134, 135, 164, 165, 164, 164, 164, 193 +int_nosucc_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 978, 157, 178, 135, 146, 146, 149, 155, 171, 170, 168, 177, 195, 199 +int_delete, 'absl::flat_hash_map, std::__cxx11::basic_string >', 193, 141, 139, 100, 130, 132, 130, 134, 162, 158, 159, 161, 162, 196 +int_insert, 'absl::flat_hash_map, std::__cxx11::basic_string >', 263, 181, 166, 169, 184, 187, 191, 194, 210, 209, 213, 220, 236, 243 +int_succ_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 173, 136, 115, 115, 135, 135, 138, 136, 164, 164, 165, 169, 165, 193 +int_nosucc_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 576, 223, 129, 142, 145, 143, 211, 297, 168, 163, 174, 175, 193, 193 +int_delete, 'absl::flat_hash_map, std::__cxx11::basic_string >', 123, 132, 101, 104, 134, 129, 180, 137, 177, 160, 159, 159, 157, 195 +int_insert, 'absl::flat_hash_map', 42, 32, 92, 48, 35, 36, 40, 41, 50, 51, 54, 56, 71, 71 +int_succ_lookup, 'absl::flat_hash_map', 19, 20, 68, 26, 24, 24, 26, 25, 34, 35, 38, 35, 36, 46 +int_nosucc_lookup, 'absl::flat_hash_map', 312, 40, 58, 57, 52, 48, 52, 50, 63, 62, 65, 71, 77, 78 +int_delete, 'absl::flat_hash_map', 55, 28, 177, 37, 30, 29, 35, 30, 46, 52, 45, 54, 45, 71 +int_insert, 'absl::flat_hash_map', 27, 31, 30, 37, 36, 39, 37, 41, 50, 52, 54, 58, 71, 71 +int_succ_lookup, 'absl::flat_hash_map', 8, 12, 16, 23, 25, 26, 29, 25, 34, 35, 41, 36, 36, 52 +int_nosucc_lookup, 'absl::flat_hash_map', 173, 39, 38, 51, 49, 51, 51, 51, 62, 65, 66, 67, 82, 80 +int_delete, 'absl::flat_hash_map', 19, 25, 22, 62, 32, 29, 30, 31, 44, 45, 57, 45, 52, 73 +int_insert, 'absl::flat_hash_map, std::__cxx11::basic_string >', 183, 178, 155, 154, 193, 187, 189, 193, 208, 223, 217, 222, 238, 243 +int_succ_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 104, 171, 111, 114, 138, 136, 153, 141, 166, 164, 165, 165, 168, 192 +int_nosucc_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 347, 127, 126, 132, 144, 144, 145, 168, 164, 163, 169, 174, 188, 195 +int_delete, 'absl::flat_hash_map, std::__cxx11::basic_string >', 102, 101, 94, 96, 133, 134, 131, 137, 159, 155, 159, 161, 158, 188 +int_insert, 'absl::flat_hash_map, std::__cxx11::basic_string >', 186, 140, 154, 159, 185, 182, 187, 214, 207, 257, 214, 220, 292, 240 +int_succ_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 150, 101, 112, 114, 136, 137, 138, 138, 156, 219, 164, 165, 200, 195 +int_nosucc_lookup, 'absl::flat_hash_map, std::__cxx11::basic_string >', 370, 122, 127, 131, 138, 141, 150, 149, 163, 274, 169, 173, 269, 206 +int_delete, 'absl::flat_hash_map, std::__cxx11::basic_string >', 127, 83, 93, 100, 132, 133, 128, 125, 154, 221, 159, 158, 206, 212 +int_insert, 'tsl::sparse_map', 67, 88, 108, 81, 94, 97, 118, 118, 142, 142, 143, 151, 159, 171 +int_succ_lookup, 'tsl::sparse_map', 20, 35, 36, 32, 36, 42, 50, 50, 59, 68, 68, 72, 76, 80 +int_nosucc_lookup, 'tsl::sparse_map', 79, 130, 112, 90, 103, 111, 127, 133, 151, 161, 166, 178, 176, 202 +int_delete, 'tsl::sparse_map', 32, 42, 54, 41, 56, 51, 54, 65, 69, 72, 80, 82, 85, 101 +int_insert, 'tsl::sparse_map', 65, 103, 107, 98, 90, 155, 128, 137, 127, 155, 140, 175, 153, 159 +int_succ_lookup, 'tsl::sparse_map', 20, 29, 71, 33, 36, 120, 49, 53, 56, 70, 66, 73, 72, 76 +int_nosucc_lookup, 'tsl::sparse_map', 74, 85, 121, 88, 101, 147, 120, 130, 140, 151, 158, 173, 177, 184 +int_delete, 'tsl::sparse_map', 31, 36, 68, 41, 48, 53, 54, 59, 67, 72, 76, 82, 89, 90 +int_insert, 'tsl::sparse_map, std::__cxx11::basic_string >', 344, 626, 395, 487, 589, 462, 509, 545, 654, 467, 504, 556, 648, 679 +int_succ_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 94, 151, 119, 125, 133, 135, 139, 145, 159, 154, 164, 167, 175, 178 +int_nosucc_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 650, 576, 600, 494, 580, 461, 516, 552, 653, 472, 519, 549, 621, 676 +int_delete, 'tsl::sparse_map, std::__cxx11::basic_string >', 325, 380, 376, 313, 360, 285, 322, 344, 400, 310, 336, 365, 395, 426 +int_insert, 'tsl::sparse_map, std::__cxx11::basic_string >', 547, 407, 391, 469, 549, 432, 477, 569, 603, 474, 514, 559, 602, 649 +int_succ_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 71, 103, 115, 122, 130, 136, 137, 162, 150, 158, 161, 166, 169, 178 +int_nosucc_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 493, 443, 382, 478, 565, 428, 470, 709, 603, 468, 509, 544, 596, 641 +int_delete, 'tsl::sparse_map, std::__cxx11::basic_string >', 253, 265, 256, 301, 356, 287, 312, 387, 396, 315, 349, 366, 392, 427 +int_insert, 'tsl::sparse_map', 69, 234, 77, 145, 102, 108, 114, 117, 139, 143, 148, 157, 276, 192 +int_succ_lookup, 'tsl::sparse_map', 21, 52, 36, 79, 38, 47, 47, 50, 56, 67, 72, 77, 75, 87 +int_nosucc_lookup, 'tsl::sparse_map', 90, 135, 85, 127, 103, 113, 125, 135, 152, 154, 164, 178, 187, 183 +int_delete, 'tsl::sparse_map', 56, 49, 38, 44, 48, 50, 55, 60, 71, 71, 77, 96, 90, 95 +int_insert, 'tsl::sparse_map', 67, 87, 102, 153, 89, 119, 129, 135, 133, 137, 158, 152, 181, 159 +int_succ_lookup, 'tsl::sparse_map', 19, 28, 53, 23, 35, 47, 49, 89, 59, 66, 73, 102, 77, 80 +int_nosucc_lookup, 'tsl::sparse_map', 72, 89, 92, 85, 97, 115, 119, 184, 151, 148, 172, 180, 290, 178 +int_delete, 'tsl::sparse_map', 31, 66, 49, 40, 52, 53, 56, 63, 72, 72, 79, 85, 87, 93 +int_insert, 'tsl::sparse_map, std::__cxx11::basic_string >', 323, 451, 559, 481, 558, 464, 513, 561, 640, 486, 527, 576, 675, 708 +int_succ_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 67, 159, 154, 120, 141, 136, 141, 153, 181, 167, 173, 177, 187, 191 +int_nosucc_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 614, 628, 449, 501, 560, 479, 518, 568, 962, 478, 537, 577, 661, 714 +int_delete, 'tsl::sparse_map, std::__cxx11::basic_string >', 245, 309, 251, 312, 377, 296, 311, 391, 431, 326, 352, 383, 415, 449 +int_insert, 'tsl::sparse_map, std::__cxx11::basic_string >', 547, 418, 406, 495, 576, 454, 494, 554, 641, 485, 544, 588, 633, 668 +int_succ_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 69, 101, 116, 128, 136, 139, 151, 150, 165, 167, 169, 176, 184, 190 +int_nosucc_lookup, 'tsl::sparse_map, std::__cxx11::basic_string >', 546, 446, 408, 511, 591, 447, 504, 550, 634, 488, 536, 583, 643, 662 +int_delete, 'tsl::sparse_map, std::__cxx11::basic_string >', 340, 279, 262, 321, 372, 301, 329, 361, 423, 328, 367, 386, 417, 441 +int_insert, 'tsl::robin_map', 18, 25, 37, 36, 35, 28, 31, 31, 41, 26, 30, 33, 39, 53 +int_succ_lookup, 'tsl::robin_map', 9, 25, 21, 20, 20, 23, 24, 24, 24, 20, 29, 24, 24, 26 +int_nosucc_lookup, 'tsl::robin_map', 23, 44, 28, 33, 39, 32, 30, 34, 42, 29, 32, 38, 38, 49 +int_delete, 'tsl::robin_map', 21, 51, 24, 24, 31, 22, 30, 27, 35, 23, 26, 29, 34, 68 +int_insert, 'tsl::robin_map', 22, 34, 31, 36, 61, 26, 32, 30, 39, 27, 32, 33, 37, 42 +int_succ_lookup, 'tsl::robin_map', 11, 24, 27, 22, 24, 24, 37, 24, 24, 24, 25, 25, 25, 28 +int_nosucc_lookup, 'tsl::robin_map', 26, 38, 30, 33, 36, 33, 31, 36, 45, 34, 33, 35, 40, 46 +int_delete, 'tsl::robin_map', 23, 41, 25, 33, 30, 21, 25, 28, 36, 25, 27, 31, 33, 38 +int_insert, 'tsl::robin_map, std::__cxx11::basic_string >', 199, 145, 149, 172, 182, 166, 174, 176, 238, 173, 218, 193, 206, 209 +int_succ_lookup, 'tsl::robin_map, std::__cxx11::basic_string >', 95, 97, 100, 108, 114, 110, 112, 109, 180, 113, 161, 127, 130, 133 +int_nosucc_lookup, 'tsl::robin_map, std::__cxx11::basic_string >', 174, 128, 119, 133, 149, 126, 136, 140, 250, 132, 211, 159, 165, 174 +int_delete, 'tsl::robin_map, std::__cxx11::basic_string >', 126, 112, 100, 119, 150, 104, 114, 167, 141, 108, 183, 135, 169, 157 +int_insert, 'tsl::robin_map, std::__cxx11::basic_string >', 140, 166, 149, 165, 177, 157, 180, 180, 195, 179, 188, 208, 213, 211 +int_succ_lookup, 'tsl::robin_map, std::__cxx11::basic_string >', 90, 133, 105, 104, 114, 109, 113, 115, 122, 119, 122, 150, 134, 139 +int_nosucc_lookup, 'tsl::robin_map, std::__cxx11::basic_string >', 125, 153, 116, 134, 149, 129, 137, 143, 165, 141, 145, 149, 161, 171 +int_delete, 'tsl::robin_map, std::__cxx11::basic_string >', 105, 116, 95, 117, 157, 109, 118, 127, 149, 117, 127, 165, 148, 159 +int_insert, 'tsl::hopscotch_map', 43, 47, 135, 85, 62, 72, 89, 103, 66, 78, 94, 115, 163, 114 +int_succ_lookup, 'tsl::hopscotch_map', 11, 12, 69, 24, 20, 23, 29, 30, 22, 25, 28, 33, 42, 40 +int_nosucc_lookup, 'tsl::hopscotch_map', 437, 56, 114, 87, 64, 76, 87, 111, 67, 80, 96, 123, 182, 110 +int_delete, 'tsl::hopscotch_map', 26, 14, 24, 24, 20, 24, 27, 34, 24, 26, 32, 37, 47, 32 +int_insert, 'tsl::hopscotch_map', 56, 76, 89, 77, 62, 168, 97, 178, 67, 80, 93, 114, 164, 70 +int_succ_lookup, 'tsl::hopscotch_map', 14, 23, 20, 23, 21, 62, 26, 30, 22, 25, 28, 35, 40, 24 +int_nosucc_lookup, 'tsl::hopscotch_map', 232, 103, 76, 84, 64, 122, 91, 122, 65, 81, 95, 121, 172, 73 +int_delete, 'tsl::hopscotch_map', 19, 41, 20, 25, 20, 41, 31, 36, 21, 26, 29, 36, 54, 25 +int_insert, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 230, 167, 163, 190, 161, 167, 181, 207, 172, 188, 204, 341, 314, 260 +int_succ_lookup, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 156, 98, 99, 113, 100, 108, 109, 118, 114, 115, 123, 208, 142, 148 +int_nosucc_lookup, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 887, 194, 151, 194, 141, 162, 174, 206, 162, 172, 199, 302, 336, 192 +int_delete, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 143, 90, 90, 104, 100, 100, 107, 113, 101, 113, 117, 217, 136, 112 +int_insert, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 140, 170, 162, 186, 157, 216, 185, 206, 173, 226, 205, 274, 322, 185 +int_succ_lookup, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 74, 98, 98, 109, 103, 116, 112, 114, 112, 136, 123, 181, 145, 119 +int_nosucc_lookup, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 623, 190, 152, 194, 149, 174, 175, 207, 147, 185, 195, 261, 347, 161 +int_delete, 'tsl::hopscotch_map, std::__cxx11::basic_string >', 83, 92, 94, 139, 101, 102, 109, 112, 107, 109, 116, 126, 152, 252 +int_insert, 'boost::unordered::unordered_map', 175, 206, 424, 392, 373, 430, 290, 255, 369, 404, 268, 214, 222, 316 +int_succ_lookup, 'boost::unordered::unordered_map', 66, 56, 153, 223, 89, 143, 84, 74, 208, 154, 85, 124, 79, 79 +int_nosucc_lookup, 'boost::unordered::unordered_map', 263, 164, 248, 286, 201, 348, 309, 204, 384, 257, 259, 275, 223, 233 +int_delete, 'boost::unordered::unordered_map', 393, 144, 217, 320, 237, 342, 414, 265, 336, 408, 433, 539, 409, 338 +int_insert, 'boost::unordered::unordered_map', 159, 323, 316, 207, 294, 469, 369, 347, 300, 326, 311, 243, 318, 300 +int_succ_lookup, 'boost::unordered::unordered_map', 67, 200, 84, 154, 153, 201, 136, 128, 87, 160, 169, 174, 176, 241 +int_nosucc_lookup, 'boost::unordered::unordered_map', 155, 367, 212, 211, 287, 304, 398, 219, 274, 343, 338, 447, 255, 363 +int_delete, 'boost::unordered::unordered_map', 293, 486, 282, 224, 332, 472, 311, 326, 447, 377, 388, 374, 295, 345 +int_insert, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 446, 614, 504, 317, 415, 347, 787, 761, 534, 390, 502, 428, 509, 712 +int_succ_lookup, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 308, 506, 427, 284, 268, 373, 514, 491, 417, 367, 488, 388, 317, 326 +int_nosucc_lookup, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 445, 405, 398, 370, 294, 464, 562, 326, 516, 502, 324, 528, 624, 564 +int_delete, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 363, 482, 436, 472, 384, 359, 683, 498, 516, 393, 549, 442, 672, 706 +int_insert, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 315, 538, 328, 377, 462, 270, 385, 429, 560, 416, 497, 570, 474, 581 +int_succ_lookup, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 306, 249, 338, 348, 433, 326, 318, 381, 313, 410, 292, 306, 529, 330 +int_nosucc_lookup, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 504, 285, 511, 417, 624, 384, 417, 482, 306, 450, 262, 511, 336, 616 +int_delete, 'boost::unordered::unordered_map, std::__cxx11::basic_string >', 459, 495, 417, 348, 393, 748, 346, 409, 478, 434, 439, 590, 443, 659 +int_insert, 'ska::unordered_map', 135, 168, 222, 129, 158, 137, 138, 165, 178, 144, 153, 163, 180, 297 +int_succ_lookup, 'ska::unordered_map', 41, 79, 60, 33, 38, 35, 36, 39, 43, 40, 43, 44, 45, 98 +int_nosucc_lookup, 'ska::unordered_map', 212, 141, 199, 127, 195, 134, 139, 157, 170, 144, 209, 163, 175, 273 +int_delete, 'ska::unordered_map', 206, 157, 120, 107, 152, 137, 179, 152, 171, 151, 180, 193, 185, 253 +int_insert, 'ska::unordered_map', 94, 203, 115, 130, 286, 169, 145, 156, 186, 150, 214, 167, 179, 202 +int_succ_lookup, 'ska::unordered_map', 23, 76, 32, 47, 82, 46, 52, 39, 61, 38, 44, 46, 43, 47 +int_nosucc_lookup, 'ska::unordered_map', 78, 185, 109, 169, 231, 131, 173, 155, 219, 147, 262, 181, 173, 184 +int_delete, 'ska::unordered_map', 57, 219, 100, 134, 283, 129, 152, 141, 181, 153, 154, 169, 160, 170 +int_insert, 'ska::unordered_map, std::__cxx11::basic_string >', 184, 182, 221, 241, 240, 230, 293, 253, 260, 233, 236, 249, 270, 451 +int_succ_lookup, 'ska::unordered_map, std::__cxx11::basic_string >', 115, 147, 195, 191, 185, 180, 246, 193, 196, 198, 191, 195, 240, 355 +int_nosucc_lookup, 'ska::unordered_map, std::__cxx11::basic_string >', 146, 242, 234, 248, 257, 229, 312, 264, 269, 241, 270, 278, 287, 427 +int_delete, 'ska::unordered_map, std::__cxx11::basic_string >', 138, 217, 210, 206, 227, 194, 228, 228, 221, 199, 214, 217, 224, 352 +int_insert, 'ska::unordered_map, std::__cxx11::basic_string >', 154, 212, 345, 218, 268, 350, 239, 337, 247, 221, 252, 237, 249, 269 +int_succ_lookup, 'ska::unordered_map, std::__cxx11::basic_string >', 132, 165, 294, 192, 187, 241, 185, 310, 196, 216, 201, 196, 206, 236 +int_nosucc_lookup, 'ska::unordered_map, std::__cxx11::basic_string >', 258, 199, 335, 218, 224, 309, 252, 356, 239, 217, 225, 231, 248, 329 +int_delete, 'ska::unordered_map, std::__cxx11::basic_string >', 212, 190, 186, 214, 214, 248, 205, 284, 210, 209, 242, 213, 222, 290 +int_insert, 'ska::bytell_hash_map', 60, 50, 71, 62, 54, 71, 76, 90, 68, 78, 120, 117, 150, 100 +int_succ_lookup, 'ska::bytell_hash_map', 14, 9, 20, 14, 20, 27, 23, 23, 25, 26, 29, 29, 30, 38 +int_nosucc_lookup, 'ska::bytell_hash_map', 118, 56, 91, 68, 54, 70, 74, 103, 67, 75, 93, 115, 184, 104 +int_delete, 'ska::bytell_hash_map', 27, 19, 36, 27, 26, 35, 29, 41, 33, 37, 40, 51, 67, 39 +int_insert, 'ska::bytell_hash_map', 44, 113, 53, 106, 75, 61, 89, 155, 78, 78, 99, 118, 177, 80 +int_succ_lookup, 'ska::bytell_hash_map', 8, 40, 21, 20, 29, 21, 24, 45, 26, 28, 28, 30, 44, 28 +int_nosucc_lookup, 'ska::bytell_hash_map', 66, 111, 66, 79, 78, 64, 81, 160, 67, 77, 93, 113, 218, 70 +int_delete, 'ska::bytell_hash_map', 20, 24, 26, 26, 32, 27, 33, 75, 29, 34, 40, 48, 66, 37 +int_insert, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 170, 191, 176, 198, 185, 190, 213, 222, 197, 205, 216, 240, 367, 210 +int_succ_lookup, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 230, 116, 107, 118, 122, 106, 124, 120, 123, 135, 121, 127, 176, 124 +int_nosucc_lookup, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 267, 192, 212, 195, 137, 161, 201, 207, 142, 159, 196, 368, 372, 162 +int_delete, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 97, 116, 114, 132, 112, 122, 121, 131, 121, 122, 135, 234, 201, 132 +int_insert, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 214, 186, 175, 197, 175, 189, 255, 247, 188, 201, 255, 241, 589, 209 +int_succ_lookup, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 83, 97, 103, 95, 120, 115, 155, 123, 120, 121, 130, 135, 153, 131 +int_nosucc_lookup, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 191, 177, 172, 171, 195, 160, 210, 218, 169, 171, 190, 219, 299, 235 +int_delete, 'ska::bytell_hash_map, std::__cxx11::basic_string >', 218, 104, 109, 102, 115, 121, 131, 134, 126, 128, 135, 142, 152, 157 +int_insert, 'ska::flat_hash_map', 60, 30, 84, 41, 51, 41, 46, 48, 54, 57, 49, 52, 66, 70 +int_succ_lookup, 'ska::flat_hash_map', 28, 12, 60, 19, 21, 22, 30, 24, 24, 27, 26, 27, 28, 27 +int_nosucc_lookup, 'ska::flat_hash_map', 49, 33, 58, 45, 49, 48, 77, 51, 60, 50, 52, 59, 59, 62 +int_delete, 'ska::flat_hash_map', 23, 18, 29, 23, 28, 21, 36, 27, 35, 24, 30, 30, 38, 36 +int_insert, 'ska::flat_hash_map', 25, 79, 43, 43, 49, 44, 49, 48, 52, 45, 49, 50, 52, 70 +int_succ_lookup, 'ska::flat_hash_map', 8, 19, 66, 19, 21, 23, 29, 24, 24, 25, 26, 28, 26, 26 +int_nosucc_lookup, 'ska::flat_hash_map', 27, 63, 85, 50, 61, 45, 101, 71, 59, 52, 47, 57, 55, 60 +int_delete, 'ska::flat_hash_map', 15, 46, 30, 25, 45, 20, 47, 51, 35, 25, 25, 30, 38, 35 +int_insert, 'ska::flat_hash_map, std::__cxx11::basic_string >', 170, 180, 169, 162, 180, 158, 174, 173, 261, 176, 181, 190, 277, 203 +int_succ_lookup, 'ska::flat_hash_map, std::__cxx11::basic_string >', 187, 109, 114, 101, 109, 102, 106, 109, 121, 113, 115, 122, 170, 129 +int_nosucc_lookup, 'ska::flat_hash_map, std::__cxx11::basic_string >', 211, 135, 161, 138, 152, 147, 123, 146, 158, 136, 139, 145, 171, 170 +int_delete, 'ska::flat_hash_map, std::__cxx11::basic_string >', 135, 129, 127, 121, 144, 128, 109, 119, 143, 118, 148, 138, 145, 147 +int_insert, 'ska::flat_hash_map, std::__cxx11::basic_string >', 137, 149, 144, 163, 176, 158, 251, 171, 188, 179, 184, 186, 203, 203 +int_succ_lookup, 'ska::flat_hash_map, std::__cxx11::basic_string >', 112, 98, 78, 109, 145, 105, 174, 109, 117, 114, 120, 116, 125, 137 +int_nosucc_lookup, 'ska::flat_hash_map, std::__cxx11::basic_string >', 222, 135, 99, 130, 198, 130, 137, 140, 153, 130, 170, 148, 154, 172 +int_delete, 'ska::flat_hash_map, std::__cxx11::basic_string >', 178, 104, 74, 115, 159, 101, 132, 119, 138, 108, 125, 127, 139, 157 +int_insert, 'phmap::parallel_flat_hash_map', 47, 45, 60, 58, 55, 56, 69, 75, 72, 71, 129, 93, 112, 98 +int_succ_lookup, 'phmap::parallel_flat_hash_map', 12, 12, 48, 21, 32, 29, 32, 30, 40, 43, 52, 58, 48, 55 +int_nosucc_lookup, 'phmap::parallel_flat_hash_map', 158, 54, 66, 64, 59, 60, 67, 74, 78, 76, 86, 123, 128, 93 +int_delete, 'phmap::parallel_flat_hash_map', 48, 25, 39, 32, 38, 32, 41, 37, 53, 51, 62, 70, 63, 78 +int_insert, 'phmap::parallel_flat_hash_map', 59, 152, 61, 49, 81, 55, 57, 69, 67, 78, 82, 86, 128, 89 +int_succ_lookup, 'phmap::parallel_flat_hash_map', 22, 99, 41, 17, 36, 28, 27, 30, 38, 48, 52, 40, 67, 53 +int_nosucc_lookup, 'phmap::parallel_flat_hash_map', 197, 98, 123, 60, 61, 60, 69, 79, 72, 104, 88, 95, 207, 117 +int_delete, 'phmap::parallel_flat_hash_map', 48, 44, 70, 35, 37, 36, 38, 42, 58, 73, 60, 91, 85, 95 +int_insert, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 309, 245, 199, 231, 222, 230, 237, 261, 252, 427, 268, 287, 335, 325 +int_succ_lookup, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 139, 147, 120, 131, 156, 160, 147, 164, 174, 271, 170, 179, 178, 205 +int_nosucc_lookup, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 957, 200, 166, 172, 183, 179, 171, 194, 200, 364, 200, 261, 284, 236 +int_delete, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 259, 117, 116, 117, 135, 133, 135, 183, 165, 265, 169, 183, 208, 197 +int_insert, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 281, 247, 200, 236, 225, 224, 246, 247, 247, 266, 267, 392, 330, 277 +int_succ_lookup, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 175, 152, 125, 130, 111, 140, 140, 142, 196, 168, 172, 249, 180, 201 +int_nosucc_lookup, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 617, 188, 151, 204, 149, 166, 172, 173, 200, 204, 218, 251, 240, 224 +int_delete, 'phmap::parallel_flat_hash_map, std::__cxx11::basic_string >', 123, 131, 113, 123, 104, 128, 179, 122, 162, 174, 212, 178, 230, 201 +int_insert, 'phmap::parallel_node_hash_map', 259, 122, 136, 137, 168, 162, 179, 174, 165, 169, 192, 197, 260, 214 +int_succ_lookup, 'phmap::parallel_node_hash_map', 32, 40, 51, 56, 82, 70, 69, 70, 71, 79, 75, 98, 84, 90 +int_nosucc_lookup, 'phmap::parallel_node_hash_map', 533, 181, 131, 158, 165, 164, 175, 180, 182, 176, 193, 218, 262, 248 +int_delete, 'phmap::parallel_node_hash_map', 112, 122, 123, 183, 168, 173, 191, 192, 236, 210, 207, 230, 249, 249 +int_insert, 'phmap::parallel_node_hash_map', 91, 140, 114, 141, 144, 141, 306, 183, 156, 284, 182, 379, 243, 205 +int_succ_lookup, 'phmap::parallel_node_hash_map', 24, 55, 48, 52, 79, 66, 63, 66, 71, 102, 83, 79, 102, 100 +int_nosucc_lookup, 'phmap::parallel_node_hash_map', 389, 392, 123, 147, 143, 150, 178, 182, 177, 201, 183, 210, 796, 211 +int_delete, 'phmap::parallel_node_hash_map', 105, 171, 126, 138, 173, 151, 206, 175, 229, 229, 208, 224, 261, 252 +int_insert, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 177, 238, 276, 277, 245, 284, 303, 313, 283, 296, 320, 366, 426, 358 +int_succ_lookup, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 101, 143, 174, 171, 197, 215, 212, 219, 237, 230, 245, 257, 270, 276 +int_nosucc_lookup, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 971, 221, 224, 228, 222, 219, 243, 259, 246, 257, 272, 315, 364, 313 +int_delete, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 147, 157, 198, 201, 247, 223, 251, 255, 279, 262, 259, 297, 298, 300 +int_insert, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 210, 241, 242, 282, 264, 282, 292, 343, 288, 443, 324, 395, 423, 314 +int_succ_lookup, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 148, 142, 173, 208, 202, 240, 207, 217, 244, 388, 241, 267, 267, 282 +int_nosucc_lookup, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 1011, 225, 221, 248, 219, 250, 246, 284, 291, 569, 276, 384, 376, 281 +int_delete, 'phmap::parallel_node_hash_map, std::__cxx11::basic_string >', 138, 168, 220, 212, 224, 230, 237, 248, 412, 463, 264, 295, 299, 297 +int_insert, 'emilib::HashMap', 23, 20, 22, 24, 26, 35, 29, 31, 40, 44, 37, 40, 45, 49 +int_succ_lookup, 'emilib::HashMap', 11, 14, 14, 20, 21, 25, 26, 27, 33, 33, 31, 36, 34, 34 +int_nosucc_lookup, 'emilib::HashMap', 26, 21, 28, 28, 28, 38, 31, 36, 40, 53, 36, 41, 43, 47 +int_delete, 'emilib::HashMap', 9, 8, 11, 15, 14, 18, 16, 19, 21, 26, 21, 23, 29, 26 +int_insert, 'emilib::HashMap', 15, 16, 22, 22, 29, 80, 121, 33, 37, 50, 39, 39, 45, 51 +int_succ_lookup, 'emilib::HashMap', 9, 12, 13, 19, 24, 28, 46, 28, 33, 34, 32, 31, 43, 36 +int_nosucc_lookup, 'emilib::HashMap', 18, 18, 24, 23, 31, 37, 37, 32, 39, 50, 37, 38, 54, 50 +int_delete, 'emilib::HashMap', 6, 7, 10, 11, 15, 17, 61, 17, 21, 25, 21, 22, 28, 26 +int_insert, 'emilib::HashMap, std::__cxx11::basic_string >', 134, 138, 161, 155, 156, 159, 163, 165, 182, 178, 174, 177, 488, 182 +int_succ_lookup, 'emilib::HashMap, std::__cxx11::basic_string >', 80, 93, 106, 118, 108, 114, 110, 111, 116, 119, 117, 120, 341, 133 +int_nosucc_lookup, 'emilib::HashMap, std::__cxx11::basic_string >', 169, 120, 121, 142, 124, 125, 129, 129, 130, 141, 138, 151, 179, 147 +int_delete, 'emilib::HashMap, std::__cxx11::basic_string >', 127, 84, 91, 115, 106, 102, 103, 101, 105, 124, 114, 112, 153, 123 +int_insert, 'emilib::HashMap, std::__cxx11::basic_string >', 183, 137, 146, 154, 147, 161, 154, 159, 168, 173, 170, 174, 179, 181 +int_succ_lookup, 'emilib::HashMap, std::__cxx11::basic_string >', 138, 90, 101, 101, 105, 125, 104, 106, 120, 118, 120, 127, 122, 129 +int_nosucc_lookup, 'emilib::HashMap, std::__cxx11::basic_string >', 208, 111, 121, 111, 123, 137, 121, 122, 135, 134, 138, 139, 144, 142 +int_delete, 'emilib::HashMap, std::__cxx11::basic_string >', 94, 79, 94, 87, 95, 105, 101, 100, 153, 115, 111, 114, 115, 124 +int_insert, 'robin_hood::detail::Table, std::equal_to >', 102, 126, 38, 112, 81, 144, 120, 168, 69, 163, 130, 135, 112, 91 +int_succ_lookup, 'robin_hood::detail::Table, std::equal_to >', 35, 53, 22, 65, 33, 61, 30, 127, 73, 51, 52, 119, 179, 43 +int_nosucc_lookup, 'robin_hood::detail::Table, std::equal_to >', 266, 366, 38, 911, 40, 84, 93, 167, 67, 103, 82, 113, 114, 83 +int_delete, 'robin_hood::detail::Table, std::equal_to >', 22, 25, 64, 40, 35, 99, 118, 276, 44, 98, 104, 207, 185, 103 +int_insert, 'robin_hood::detail::Table, std::equal_to >', 89, 87, 50, 97, 66, 42, 84, 151, 180, 78, 218, 101, 58, 135 +int_succ_lookup, 'robin_hood::detail::Table, std::equal_to >', 40, 63, 49, 30, 29, 34, 35, 104, 79, 38, 88, 74, 37, 85 +int_nosucc_lookup, 'robin_hood::detail::Table, std::equal_to >', 245, 298, 94, 552, 40, 87, 90, 110, 92, 77, 176, 124, 70, 89 +int_delete, 'robin_hood::detail::Table, std::equal_to >', 105, 77, 62, 126, 65, 79, 111, 77, 132, 176, 136, 118, 64, 131 +int_insert, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 495, 480, 352, 848, 246, 185, 347, 315, 176, 174, 240, 353, 150, 180 +int_succ_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 247, 224, 202, 241, 200, 190, 80, 121, 121, 112, 125, 127, 119, 89 +int_nosucc_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 981, 2163, 456, 5086, 252, 204, 179, 314, 184, 185, 216, 367, 131, 115 +int_delete, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 135, 149, 263, 287, 253, 238, 153, 268, 133, 173, 194, 475, 133, 111 +int_insert, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 261, 278, 141, 408, 141, 158, 203, 300, 262, 275, 392, 503, 315, 543 +int_succ_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 124, 84, 93, 73, 98, 108, 109, 119, 114, 216, 136, 195, 265, 305 +int_nosucc_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 530, 763, 277, 2340, 128, 158, 202, 341, 256, 544, 355, 541, 377, 315 +int_delete, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 82, 114, 196, 83, 127, 176, 239, 285, 259, 355, 405, 643, 296, 298 +int_insert, 'robin_hood::detail::Table, std::equal_to >', 58, 147, 92, 239, 73, 172, 123, 237, 151, 151, 192, 187, 72, 230 +int_succ_lookup, 'robin_hood::detail::Table, std::equal_to >', 24, 69, 47, 105, 123, 235, 77, 67, 212, 65, 69, 74, 103, 146 +int_nosucc_lookup, 'robin_hood::detail::Table, std::equal_to >', 343, 1310, 71, 1802, 89, 104, 107, 145, 118, 136, 131, 194, 83, 134 +int_delete, 'robin_hood::detail::Table, std::equal_to >', 44, 60, 106, 155, 255, 262, 163, 219, 171, 222, 226, 244, 276, 297 +int_insert, 'robin_hood::detail::Table, std::equal_to >', 146, 76, 42, 197, 122, 96, 163, 172, 85, 227, 150, 180, 108, 173 +int_succ_lookup, 'robin_hood::detail::Table, std::equal_to >', 68, 80, 51, 80, 166, 67, 60, 133, 108, 161, 159, 131, 89, 164 +int_nosucc_lookup, 'robin_hood::detail::Table, std::equal_to >', 451, 1078, 99, 1906, 89, 168, 105, 151, 125, 202, 95, 315, 58, 160 +int_delete, 'robin_hood::detail::Table, std::equal_to >', 83, 220, 123, 221, 206, 151, 151, 288, 319, 260, 238, 456, 268, 230 +int_insert, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 188, 229, 240, 326, 312, 271, 325, 465, 264, 203, 312, 422, 258, 395 +int_succ_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 105, 250, 349, 256, 320, 244, 311, 333, 254, 331, 320, 355, 271, 217 +int_nosucc_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 939, 1950, 232, 4612, 253, 133, 326, 327, 273, 290, 368, 293, 224, 150 +int_delete, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 174, 260, 254, 305, 310, 297, 289, 546, 324, 345, 328, 383, 282, 204 +int_insert, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 317, 336, 242, 295, 196, 216, 298, 412, 401, 256, 455, 296, 202, 284 +int_succ_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 236, 194, 293, 240, 321, 283, 252, 328, 422, 280, 265, 321, 239, 274 +int_nosucc_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 1039, 1621, 155, 3866, 176, 170, 308, 420, 231, 321, 283, 322, 209, 182 +int_delete, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 294, 265, 183, 409, 445, 274, 433, 364, 316, 397, 410, 328, 333, 322 +int_insert, 'robin_hood::detail::Table, std::equal_to >', 74, 109, 108, 124, 60, 69, 65, 86, 47, 74, 77, 108, 52, 134 +int_succ_lookup, 'robin_hood::detail::Table, std::equal_to >', 37, 137, 155, 90, 73, 80, 52, 54, 48, 57, 55, 63, 57, 61 +int_nosucc_lookup, 'robin_hood::detail::Table, std::equal_to >', 324, 936, 89, 2001, 67, 88, 69, 95, 48, 79, 81, 112, 54, 79 +int_delete, 'robin_hood::detail::Table, std::equal_to >', 57, 144, 125, 160, 121, 152, 109, 133, 124, 133, 142, 162, 140, 180 +int_insert, 'robin_hood::detail::Table, std::equal_to >', 45, 89, 74, 74, 46, 166, 88, 128, 102, 147, 224, 229, 243, 72 +int_succ_lookup, 'robin_hood::detail::Table, std::equal_to >', 39, 66, 86, 128, 73, 130, 56, 65, 70, 155, 116, 65, 146, 59 +int_nosucc_lookup, 'robin_hood::detail::Table, std::equal_to >', 415, 930, 64, 1715, 68, 94, 100, 183, 117, 79, 156, 160, 160, 59 +int_delete, 'robin_hood::detail::Table, std::equal_to >', 29, 55, 67, 107, 102, 236, 134, 167, 199, 191, 269, 309, 183, 199 +int_insert, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 185, 258, 320, 245, 211, 218, 251, 273, 260, 273, 271, 473, 143, 354 +int_succ_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 120, 284, 314, 179, 290, 253, 253, 223, 226, 299, 317, 366, 189, 360 +int_nosucc_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 743, 1682, 221, 3335, 251, 144, 132, 287, 201, 254, 559, 333, 173, 206 +int_delete, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 90, 156, 239, 228, 253, 235, 288, 307, 229, 282, 289, 277, 186, 276 +int_insert, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 188, 176, 128, 171, 134, 142, 160, 183, 141, 151, 235, 198, 137, 160 +int_succ_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 118, 158, 142, 148, 156, 160, 166, 171, 172, 177, 237, 194, 193, 240 +int_nosucc_lookup, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 669, 1179, 96, 2382, 107, 112, 131, 161, 132, 142, 178, 207, 141, 188 +int_delete, 'robin_hood::detail::Table, std::__cxx11::basic_string, robin_hood::hash >, std::equal_to > >', 108, 126, 154, 161, 147, 152, 196, 197, 176, 184, 199, 216, 194, 234 +int_insert, 'absl::node_hash_map', 136, 129, 113, 106, 117, 211, 122, 131, 140, 121, 142, 215, 171, 149 +int_succ_lookup, 'absl::node_hash_map', 61, 81, 127, 53, 59, 145, 64, 64, 71, 66, 76, 147, 74, 80 +int_nosucc_lookup, 'absl::node_hash_map', 801, 170, 269, 109, 115, 205, 118, 122, 131, 120, 133, 225, 156, 140 +int_delete, 'absl::node_hash_map', 164, 189, 308, 164, 188, 256, 203, 204, 228, 199, 231, 325, 290, 256 +int_insert, 'absl::node_hash_map', 96, 128, 137, 283, 114, 115, 118, 312, 129, 137, 137, 141, 249, 229 +int_succ_lookup, 'absl::node_hash_map', 29, 79, 127, 310, 58, 58, 64, 146, 74, 73, 75, 75, 133, 165 +int_nosucc_lookup, 'absl::node_hash_map', 411, 137, 160, 263, 115, 117, 118, 292, 130, 131, 134, 142, 257, 231 +int_delete, 'absl::node_hash_map', 92, 168, 166, 270, 236, 208, 214, 385, 416, 236, 238, 250, 279, 288 +int_insert, 'absl::node_hash_map, std::__cxx11::basic_string >', 140, 157, 297, 183, 189, 202, 205, 211, 386, 333, 258, 281, 315, 378 +int_succ_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 114, 154, 361, 182, 205, 233, 272, 228, 275, 387, 261, 259, 272, 529 +int_nosucc_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 846, 367, 210, 186, 180, 204, 216, 282, 463, 496, 252, 274, 296, 399 +int_delete, 'absl::node_hash_map, std::__cxx11::basic_string >', 181, 232, 224, 192, 218, 251, 261, 340, 425, 460, 290, 297, 299, 466 +int_insert, 'absl::node_hash_map, std::__cxx11::basic_string >', 332, 351, 180, 308, 210, 445, 214, 245, 336, 490, 226, 245, 263, 578 +int_succ_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 161, 212, 192, 248, 238, 430, 252, 209, 304, 331, 264, 263, 268, 402 +int_nosucc_lookup, 'absl::node_hash_map, std::__cxx11::basic_string >', 1630, 385, 182, 393, 208, 425, 209, 210, 369, 464, 234, 251, 284, 606 +int_delete, 'absl::node_hash_map, std::__cxx11::basic_string >', 362, 347, 213, 410, 267, 336, 253, 243, 464, 491, 287, 299, 299, 642 \ No newline at end of file diff --git a/src/includes/3thparty/abseil-cpp b/src/includes/3thparty/abseil-cpp new file mode 160000 index 0000000..08a7e7b --- /dev/null +++ b/src/includes/3thparty/abseil-cpp @@ -0,0 +1 @@ +Subproject commit 08a7e7bf972c8451855a5022f2faf3d3655db015 diff --git a/src/includes/3thparty/emilib/hash_map.hpp b/src/includes/3thparty/emilib/hash_map.hpp new file mode 100644 index 0000000..11c673b --- /dev/null +++ b/src/includes/3thparty/emilib/hash_map.hpp @@ -0,0 +1,666 @@ +// By Emil Ernerfeldt 2014-2017 +// LICENSE: +// This software is dual-licensed to the public domain and under the following +// license: you are granted a perpetual, irrevocable license to copy, modify, +// publish, and distribute this file as you see fit. + +#pragma once + +#include +#include +#include + +#include "loguru.hpp" + +namespace emilib { + +/// like std::equal_to but no need to #include +template +struct HashMapEqualTo +{ + constexpr bool operator()(const T& lhs, const T& rhs) const + { + return lhs == rhs; + } +}; + +/// A cache-friendly hash table with open addressing, linear probing and power-of-two capacity +template , typename EqT = HashMapEqualTo> +class HashMap +{ +private: + using MyType = HashMap; + + using PairT = std::pair; +public: + using size_type = size_t; + using value_type = PairT; + using reference = PairT&; + using const_reference = const PairT&; + + class iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using difference_type = size_t; + using distance_type = size_t; + using value_type = std::pair; + using pointer = value_type*; + using reference = value_type&; + + iterator() { } + + iterator(MyType* hash_map, size_t bucket) : _map(hash_map), _bucket(bucket) + { + } + + iterator& operator++() + { + this->goto_next_element(); + return *this; + } + + iterator operator++(int) + { + size_t old_index = _bucket; + this->goto_next_element(); + return iterator(_map, old_index); + } + + reference operator*() const + { + return _map->_pairs[_bucket]; + } + + pointer operator->() const + { + return _map->_pairs + _bucket; + } + + bool operator==(const iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket == rhs._bucket; + } + + bool operator!=(const iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket != rhs._bucket; + } + + private: + void goto_next_element() + { + DCHECK_LT_F(_bucket, _map->_num_buckets); + do { + _bucket++; + } while (_bucket < _map->_num_buckets && _map->_states[_bucket] != State::FILLED); + } + + //private: + // friend class MyType; + public: + MyType* _map; + size_t _bucket; + }; + + class const_iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using difference_type = size_t; + using distance_type = size_t; + using value_type = const std::pair; + using pointer = value_type*; + using reference = value_type&; + + const_iterator() { } + + const_iterator(iterator proto) : _map(proto._map), _bucket(proto._bucket) + { + } + + const_iterator(const MyType* hash_map, size_t bucket) : _map(hash_map), _bucket(bucket) + { + } + + const_iterator& operator++() + { + this->goto_next_element(); + return *this; + } + + const_iterator operator++(int) + { + size_t old_index = _bucket; + this->goto_next_element(); + return const_iterator(_map, old_index); + } + + reference operator*() const + { + return _map->_pairs[_bucket]; + } + + pointer operator->() const + { + return _map->_pairs + _bucket; + } + + bool operator==(const const_iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket == rhs._bucket; + } + + bool operator!=(const const_iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket != rhs._bucket; + } + + private: + void goto_next_element() + { + DCHECK_LT_F(_bucket, _map->_num_buckets); + do { + _bucket++; + } while (_bucket < _map->_num_buckets && _map->_states[_bucket] != State::FILLED); + } + + //private: + // friend class MyType; + public: + const MyType* _map; + size_t _bucket; + }; + + // ------------------------------------------------------------------------ + + HashMap() = default; + + HashMap(const HashMap& other) + { + reserve(other.size()); + insert(other.cbegin(), other.cend()); + } + + HashMap(HashMap&& other) + { + *this = std::move(other); + } + + HashMap& operator=(const HashMap& other) + { + clear(); + reserve(other.size()); + insert(other.cbegin(), other.cend()); + return *this; + } + + void operator=(HashMap&& other) + { + this->swap(other); + } + + ~HashMap() + { + for (size_t bucket=0; bucket<_num_buckets; ++bucket) { + if (_states[bucket] == State::FILLED) { + _pairs[bucket].~PairT(); + } + } + free(_states); + free(_pairs); + } + + void swap(HashMap& other) + { + std::swap(_hasher, other._hasher); + std::swap(_eq, other._eq); + std::swap(_states, other._states); + std::swap(_pairs, other._pairs); + std::swap(_num_buckets, other._num_buckets); + std::swap(_num_filled, other._num_filled); + std::swap(_max_probe_length, other._max_probe_length); + std::swap(_mask, other._mask); + } + + // ------------------------------------------------------------- + + iterator begin() + { + size_t bucket = 0; + while (bucket<_num_buckets && _states[bucket] != State::FILLED) { + ++bucket; + } + return iterator(this, bucket); + } + + const_iterator cbegin() const + { + size_t bucket = 0; + while (bucket<_num_buckets && _states[bucket] != State::FILLED) { + ++bucket; + } + return const_iterator(this, bucket); + } + + const_iterator begin() const + { + return cbegin(); + } + + iterator end() + { + return iterator(this, _num_buckets); + } + + const_iterator cend() const + { + return const_iterator(this, _num_buckets); + } + + const_iterator end() const + { + return cend(); + } + + size_t size() const + { + return _num_filled; + } + + bool empty() const + { + return _num_filled==0; + } + + // Returns the number of buckets. + size_t bucket_count() const + { + return _num_buckets; + } + + /// Returns average number of elements per bucket. + float load_factor() const + { + return static_cast(_num_filled) / static_cast(_num_buckets); + } + + // ------------------------------------------------------------ + + template + iterator find(const KeyLike& key) + { + auto bucket = this->find_filled_bucket(key); + if (bucket == (size_t)-1) { + return this->end(); + } + return iterator(this, bucket); + } + + template + const_iterator find(const KeyLike& key) const + { + auto bucket = this->find_filled_bucket(key); + if (bucket == (size_t)-1) + { + return this->end(); + } + return const_iterator(this, bucket); + } + + template + bool contains(const KeyLike& k) const + { + return find_filled_bucket(k) != (size_t)-1; + } + + template + size_t count(const KeyLike& k) const + { + return find_filled_bucket(k) != (size_t)-1 ? 1 : 0; + } + + /// Returns the matching ValueT or nullptr if k isn't found. + template + ValueT* try_get(const KeyLike& k) + { + auto bucket = find_filled_bucket(k); + if (bucket != (size_t)-1) { + return &_pairs[bucket].second; + } else { + return nullptr; + } + } + + /// Const version of the above + template + const ValueT* try_get(const KeyLike& k) const + { + auto bucket = find_filled_bucket(k); + if (bucket != (size_t)-1) { + return &_pairs[bucket].second; + } else { + return nullptr; + } + } + + /// Convenience function. + template + const ValueT get_or_return_default(const KeyLike& k) const + { + const ValueT* ret = try_get(k); + if (ret) { + return *ret; + } else { + return ValueT(); + } + } + + // ----------------------------------------------------- + + /// Returns a pair consisting of an iterator to the inserted element + /// (or to the element that prevented the insertion) + /// and a bool denoting whether the insertion took place. + std::pair insert(const KeyT& key, const ValueT& value) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + if (_states[bucket] == State::FILLED) { + return { iterator(this, bucket), false }; + } else { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, value); + _num_filled++; + return { iterator(this, bucket), true }; + } + } + + std::pair insert(const std::pair& p) + { + return insert(p.first, p.second); + } + + void insert(const_iterator begin, const_iterator end) + { + // TODO: reserve space exactly once. + for (; begin != end; ++begin) { + insert(begin->first, begin->second); + } + } + + /// Same as above, but contains(key) MUST be false + void insert_unique(KeyT&& key, ValueT&& value) + { + DCHECK_F(!contains(key)); + check_expand_need(); + auto bucket = find_empty_bucket(key); + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(std::move(key), std::move(value)); + _num_filled++; + } + + void insert_unique(std::pair&& p) + { + insert_unique(std::move(p.first), std::move(p.second)); + } + + void insert_or_assign(const KeyT& key, ValueT&& value) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + // Check if inserting a new value rather than overwriting an old entry + if (_states[bucket] == State::FILLED) { + _pairs[bucket].second = value; + } else { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, value); + _num_filled++; + } + } + + /// Return the old value or ValueT() if it didn't exist. + ValueT set_get(const KeyT& key, const ValueT& new_value) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + // Check if inserting a new value rather than overwriting an old entry + if (_states[bucket] == State::FILLED) { + ValueT old_value = _pairs[bucket].second; + _pairs[bucket] = new_value.second; + return old_value; + } else { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, new_value); + _num_filled++; + return ValueT(); + } + } + + /// Like std::map::operator[]. + ValueT& operator[](const KeyT& key) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + /* Check if inserting a new value rather than overwriting an old entry */ + if (_states[bucket] != State::FILLED) { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, ValueT()); + _num_filled++; + } + + return _pairs[bucket].second; + } + + // ------------------------------------------------------- + + /// Erase an element from the hash table. + /// return false if element was not found + bool erase(const KeyT& key) + { + auto bucket = find_filled_bucket(key); + if (bucket != (size_t)-1) { + _states[bucket] = State::ACTIVE; + _pairs[bucket].~PairT(); + _num_filled -= 1; + return true; + } else { + return false; + } + } + + /// Erase an element using an iterator. + /// Returns an iterator to the next element (or end()). + iterator erase(iterator it) + { + DCHECK_EQ_F(it._map, this); + DCHECK_LT_F(it._bucket, _num_buckets); + _states[it._bucket] = State::ACTIVE; + _pairs[it._bucket].~PairT(); + _num_filled -= 1; + return ++it; + } + + /// Remove all elements, keeping full capacity. + void clear() + { + for (size_t bucket=0; bucket<_num_buckets; ++bucket) { + if (_states[bucket] == State::FILLED) { + _states[bucket] = State::INACTIVE; + _pairs[bucket].~PairT(); + } + } + _num_filled = 0; + _max_probe_length = -1; + } + + /// Make room for this many elements + void reserve(size_t num_elems) + { + size_t required_buckets = num_elems + num_elems/2 + 1; + if (required_buckets <= _num_buckets) { + return; + } + size_t num_buckets = 4; + while (num_buckets < required_buckets) { num_buckets *= 2; } + + auto new_states = (State*)malloc(num_buckets * sizeof(State)); + auto new_pairs = (PairT*)malloc(num_buckets * sizeof(PairT)); + + if (!new_states || !new_pairs) { + free(new_states); + free(new_pairs); + throw std::bad_alloc(); + } + + //auto old_num_filled = _num_filled; + auto old_num_buckets = _num_buckets; + auto old_states = _states; + auto old_pairs = _pairs; + + _num_filled = 0; + _num_buckets = num_buckets; + _mask = _num_buckets - 1; + _states = new_states; + _pairs = new_pairs; + + std::fill_n(_states, num_buckets, State::INACTIVE); + + _max_probe_length = -1; + + for (size_t src_bucket=0; src_bucket + size_t find_filled_bucket(const KeyLike& key) const + { + if (empty()) { return (size_t)-1; } // Optimization + + auto hash_value = _hasher(key); + for (int offset=0; offset<=_max_probe_length; ++offset) { + auto bucket = (hash_value + offset) & _mask; + if (_states[bucket] == State::FILLED) { + if (_eq(_pairs[bucket].first, key)) { + return bucket; + } + } else if (_states[bucket] == State::INACTIVE) { + return (size_t)-1; // End of the chain! + } + } + return (size_t)-1; + } + + // Find the bucket with this key, or return a good empty bucket to place the key in. + // In the latter case, the bucket is expected to be filled. + size_t find_or_allocate(const KeyT& key) + { + auto hash_value = _hasher(key); + size_t hole = (size_t)-1; + int offset=0; + for (; offset<=_max_probe_length; ++offset) { + auto bucket = (hash_value + offset) & _mask; + + if (_states[bucket] == State::FILLED) { + if (_eq(_pairs[bucket].first, key)) { + return bucket; + } + } else if (_states[bucket] == State::INACTIVE) { + return bucket; + } else { + // ACTIVE: keep searching + if (hole == (size_t)-1) { + hole = bucket; + } + } + } + + // No key found - but maybe a hole for it + + DCHECK_EQ_F(offset, _max_probe_length+1); + + if (hole != (size_t)-1) { + return hole; + } + + // No hole found within _max_probe_length + for (; ; ++offset) { + auto bucket = (hash_value + offset) & _mask; + + if (_states[bucket] != State::FILLED) { + _max_probe_length = offset; + return bucket; + } + } + } + + // key is not in this map. Find a place to put it. + size_t find_empty_bucket(const KeyT& key) + { + auto hash_value = _hasher(key); + for (int offset=0; ; ++offset) { + auto bucket = (hash_value + offset) & _mask; + if (_states[bucket] != State::FILLED) { + if (offset > _max_probe_length) { + _max_probe_length = offset; + } + return bucket; + } + } + } + +private: + enum class State : uint8_t + { + INACTIVE, // Never been touched + ACTIVE, // Is inside a search-chain, but is empty + FILLED // Is set with key/value + }; + + HashT _hasher; + EqT _eq; + State* _states = nullptr; + PairT* _pairs = nullptr; + size_t _num_buckets = 0; + size_t _num_filled = 0; + int _max_probe_length = -1; // Our longest bucket-brigade is this long. ONLY when we have zero elements is this ever negative (-1). + size_t _mask = 0; // _num_buckets minus one +}; + +} // namespace emilib diff --git a/src/includes/3thparty/emilib/loguru.cpp b/src/includes/3thparty/emilib/loguru.cpp new file mode 100644 index 0000000..bd75e99 --- /dev/null +++ b/src/includes/3thparty/emilib/loguru.cpp @@ -0,0 +1,1787 @@ +// Disable all warnings from gcc/clang: +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" + +#pragma GCC diagnostic ignored "-Wc++98-compat" +#pragma GCC diagnostic ignored "-Wc++98-compat-pedantic" +#pragma GCC diagnostic ignored "-Wexit-time-destructors" +#pragma GCC diagnostic ignored "-Wformat-nonliteral" +#pragma GCC diagnostic ignored "-Wglobal-constructors" +#pragma GCC diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" +#pragma GCC diagnostic ignored "-Wmissing-prototypes" +#pragma GCC diagnostic ignored "-Wpadded" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC diagnostic ignored "-Wunused-macros" +#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" + +#include "loguru.hpp" + +#ifndef LOGURU_HAS_BEEN_IMPLEMENTED +#define LOGURU_HAS_BEEN_IMPLEMENTED + +#define LOGURU_PREAMBLE_WIDTH (53 + LOGURU_THREADNAME_WIDTH + LOGURU_FILENAME_WIDTH) + +#undef min +#undef max + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 + #include + + #define localtime_r(a, b) localtime_s(b, a) // No localtime_r with MSVC, but arguments are swapped for localtime_s +#else + #include + #include // mkdir + #include // STDERR_FILENO +#endif + +#ifdef __linux__ + #include // PATH_MAX +#elif !defined(_WIN32) + #include // PATH_MAX +#endif + +#ifndef PATH_MAX + #define PATH_MAX 1024 +#endif + +#ifdef __APPLE__ + #include "TargetConditionals.h" +#endif + +// TODO: use defined(_POSIX_VERSION) for some of these things? + +#if defined(_WIN32) || defined(__CYGWIN__) + #define LOGURU_PTHREADS 0 + #define LOGURU_WINTHREADS 1 + #ifndef LOGURU_STACKTRACES + #define LOGURU_STACKTRACES 0 + #endif +#elif defined(__rtems__) + #define LOGURU_PTHREADS 1 + #define LOGURU_WINTHREADS 0 + #ifndef LOGURU_STACKTRACES + #define LOGURU_STACKTRACES 0 + #endif +#else + #define LOGURU_PTHREADS 1 + #define LOGURU_WINTHREADS 0 + #ifndef LOGURU_STACKTRACES + #define LOGURU_STACKTRACES 1 + #endif +#endif + +#if LOGURU_STACKTRACES + #include // for __cxa_demangle + #include // for dladdr + #include // for backtrace +#endif // LOGURU_STACKTRACES + +#if LOGURU_PTHREADS + #include + #if defined(__FreeBSD__) + #include + #include + #elif defined(__OpenBSD__) + #include + #endif + + #ifdef __linux__ + /* On Linux, the default thread name is the same as the name of the binary. + Additionally, all new threads inherit the name of the thread it got forked from. + For this reason, Loguru use the pthread Thread Local Storage + for storing thread names on Linux. */ + #define LOGURU_PTLS_NAMES 1 + #endif +#endif + +#if LOGURU_WINTHREADS + #ifndef _WIN32_WINNT + #define _WIN32_WINNT 0x0502 + #endif + #define WIN32_LEAN_AND_MEAN + #define NOMINMAX + #include +#endif + +#ifndef LOGURU_PTLS_NAMES + #define LOGURU_PTLS_NAMES 0 +#endif + +namespace loguru +{ + using namespace std::chrono; + +#if LOGURU_WITH_FILEABS + struct FileAbs + { + char path[PATH_MAX]; + char mode_str[4]; + Verbosity verbosity; + struct stat st; + FILE* fp; + bool is_reopening = false; // to prevent recursive call in file_reopen. + decltype(steady_clock::now()) last_check_time = steady_clock::now(); + }; +#else + typedef FILE* FileAbs; +#endif + + struct Callback + { + std::string id; + log_handler_t callback; + void* user_data; + Verbosity verbosity; // Does not change! + close_handler_t close; + flush_handler_t flush; + unsigned indentation; + }; + + using CallbackVec = std::vector; + + using StringPair = std::pair; + using StringPairList = std::vector; + + const auto s_start_time = steady_clock::now(); + + Verbosity g_stderr_verbosity = Verbosity_0; + bool g_colorlogtostderr = true; + unsigned g_flush_interval_ms = 0; + bool g_preamble = true; + + // Preamble details + bool g_preamble_date = true; + bool g_preamble_time = true; + bool g_preamble_uptime = true; + bool g_preamble_thread = true; + bool g_preamble_file = true; + bool g_preamble_verbose = true; + bool g_preamble_pipe = true; + + static std::recursive_mutex s_mutex; + static Verbosity s_max_out_verbosity = Verbosity_OFF; + static std::string s_argv0_filename; + static std::string s_arguments; + static char s_current_dir[PATH_MAX]; + static CallbackVec s_callbacks; + static fatal_handler_t s_fatal_handler = nullptr; + static verbosity_to_name_t s_verbosity_to_name_callback = nullptr; + static name_to_verbosity_t s_name_to_verbosity_callback = nullptr; + static StringPairList s_user_stack_cleanups; + static bool s_strip_file_path = true; + static std::atomic s_stderr_indentation { 0 }; + + // For periodic flushing: + static std::thread* s_flush_thread = nullptr; + static bool s_needs_flushing = false; + + static const bool s_terminal_has_color = [](){ + #ifdef _WIN32 + #ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING + #define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 + #endif + + HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); + if (hOut != INVALID_HANDLE_VALUE) { + DWORD dwMode = 0; + GetConsoleMode(hOut, &dwMode); + dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + return SetConsoleMode(hOut, dwMode) != 0; + } + return false; + #else + if (const char* term = getenv("TERM")) { + return 0 == strcmp(term, "cygwin") + || 0 == strcmp(term, "linux") + || 0 == strcmp(term, "rxvt-unicode-256color") + || 0 == strcmp(term, "screen") + || 0 == strcmp(term, "screen-256color") + || 0 == strcmp(term, "tmux-256color") + || 0 == strcmp(term, "xterm") + || 0 == strcmp(term, "xterm-256color") + || 0 == strcmp(term, "xterm-termite") + || 0 == strcmp(term, "xterm-color"); + } else { + return false; + } + #endif + }(); + + static void print_preamble_header(char* out_buff, size_t out_buff_size); + + #if LOGURU_PTLS_NAMES + static pthread_once_t s_pthread_key_once = PTHREAD_ONCE_INIT; + static pthread_key_t s_pthread_key_name; + + void make_pthread_key_name() + { + (void)pthread_key_create(&s_pthread_key_name, free); + } + #endif + + // ------------------------------------------------------------------------------ + // Colors + + bool terminal_has_color() { return s_terminal_has_color; } + + // Colors + +#ifdef _WIN32 +#define VTSEQ(ID) ("\x1b[1;" #ID "m") +#else +#define VTSEQ(ID) ("\x1b[" #ID "m") +#endif + + const char* terminal_black() { return s_terminal_has_color ? VTSEQ(30) : ""; } + const char* terminal_red() { return s_terminal_has_color ? VTSEQ(31) : ""; } + const char* terminal_green() { return s_terminal_has_color ? VTSEQ(32) : ""; } + const char* terminal_yellow() { return s_terminal_has_color ? VTSEQ(33) : ""; } + const char* terminal_blue() { return s_terminal_has_color ? VTSEQ(34) : ""; } + const char* terminal_purple() { return s_terminal_has_color ? VTSEQ(35) : ""; } + const char* terminal_cyan() { return s_terminal_has_color ? VTSEQ(36) : ""; } + const char* terminal_light_gray() { return s_terminal_has_color ? VTSEQ(37) : ""; } + const char* terminal_white() { return s_terminal_has_color ? VTSEQ(37) : ""; } + const char* terminal_light_red() { return s_terminal_has_color ? VTSEQ(91) : ""; } + const char* terminal_dim() { return s_terminal_has_color ? VTSEQ(2) : ""; } + + // Formating + const char* terminal_bold() { return s_terminal_has_color ? VTSEQ(1) : ""; } + const char* terminal_underline() { return s_terminal_has_color ? VTSEQ(4) : ""; } + + // You should end each line with this! + const char* terminal_reset() { return s_terminal_has_color ? VTSEQ(0) : ""; } + + // ------------------------------------------------------------------------------ +#if LOGURU_WITH_FILEABS + void file_reopen(void* user_data); + inline FILE* to_file(void* user_data) { return reinterpret_cast(user_data)->fp; } +#else + inline FILE* to_file(void* user_data) { return reinterpret_cast(user_data); } +#endif + + void file_log(void* user_data, const Message& message) + { +#if LOGURU_WITH_FILEABS + FileAbs* file_abs = reinterpret_cast(user_data); + if (file_abs->is_reopening) { + return; + } + // It is better checking file change every minute/hour/day, + // instead of doing this every time we log. + // Here check_interval is set to zero to enable checking every time; + const auto check_interval = seconds(0); + if (duration_cast(steady_clock::now() - file_abs->last_check_time) > check_interval) { + file_abs->last_check_time = steady_clock::now(); + file_reopen(user_data); + } + FILE* file = to_file(user_data); + if (!file) { + return; + } +#else + FILE* file = to_file(user_data); +#endif + fprintf(file, "%s%s%s%s\n", + message.preamble, message.indentation, message.prefix, message.message); + if (g_flush_interval_ms == 0) { + fflush(file); + } + } + + void file_close(void* user_data) + { + FILE* file = to_file(user_data); + if (file) { + fclose(file); + } +#if LOGURU_WITH_FILEABS + delete reinterpret_cast(user_data); +#endif + } + + void file_flush(void* user_data) + { + FILE* file = to_file(user_data); + fflush(file); + } + +#if LOGURU_WITH_FILEABS + void file_reopen(void* user_data) + { + FileAbs * file_abs = reinterpret_cast(user_data); + struct stat st; + int ret; + if (!file_abs->fp || (ret = stat(file_abs->path, &st)) == -1 || (st.st_ino != file_abs->st.st_ino)) { + file_abs->is_reopening = true; + if (file_abs->fp) { + fclose(file_abs->fp); + } + if (!file_abs->fp) { + LOG_F(INFO, "Reopening file '%s' due to previous error", file_abs->path); + } + else if (ret < 0) { + const auto why = errno_as_text(); + LOG_F(INFO, "Reopening file '%s' due to '%s'", file_abs->path, why.c_str()); + } else { + LOG_F(INFO, "Reopening file '%s' due to file changed", file_abs->path); + } + // try reopen current file. + if (!create_directories(file_abs->path)) { + LOG_F(ERROR, "Failed to create directories to '%s'", file_abs->path); + } + file_abs->fp = fopen(file_abs->path, file_abs->mode_str); + if (!file_abs->fp) { + LOG_F(ERROR, "Failed to open '%s'", file_abs->path); + } else { + stat(file_abs->path, &file_abs->st); + } + file_abs->is_reopening = false; + } + } +#endif + // ------------------------------------------------------------------------------ + + // Helpers: + + Text::~Text() { free(_str); } + + LOGURU_PRINTF_LIKE(1, 0) + static Text vtextprintf(const char* format, va_list vlist) + { +#ifdef _WIN32 + int bytes_needed = _vscprintf(format, vlist); + CHECK_F(bytes_needed >= 0, "Bad string format: '%s'", format); + char* buff = (char*)malloc(bytes_needed+1); + vsnprintf(buff, bytes_needed+1, format, vlist); + return Text(buff); +#else + char* buff = nullptr; + int result = vasprintf(&buff, format, vlist); + CHECK_F(result >= 0, "Bad string format: '%s'", format); + return Text(buff); +#endif + } + + Text textprintf(const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto result = vtextprintf(format, vlist); + va_end(vlist); + return result; + } + + // Overloaded for variadic template matching. + Text textprintf() + { + return Text(static_cast(calloc(1, 1))); + } + + static const char* indentation(unsigned depth) + { + static const char buff[] = + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . " + ". . . . . . . . . . " ". . . . . . . . . . "; + static const size_t INDENTATION_WIDTH = 4; + static const size_t NUM_INDENTATIONS = (sizeof(buff) - 1) / INDENTATION_WIDTH; + depth = std::min(depth, NUM_INDENTATIONS); + return buff + INDENTATION_WIDTH * (NUM_INDENTATIONS - depth); + } + + static void parse_args(int& argc, char* argv[], const char* verbosity_flag) + { + int arg_dest = 1; + int out_argc = argc; + + for (int arg_it = 1; arg_it < argc; ++arg_it) { + auto cmd = argv[arg_it]; + auto arg_len = strlen(verbosity_flag); + if (strncmp(cmd, verbosity_flag, arg_len) == 0 && !std::isalpha(cmd[arg_len], std::locale(""))) { + out_argc -= 1; + auto value_str = cmd + arg_len; + if (value_str[0] == '\0') { + // Value in separate argument + arg_it += 1; + CHECK_LT_F(arg_it, argc, "Missing verbosiy level after %s", verbosity_flag); + value_str = argv[arg_it]; + out_argc -= 1; + } + if (*value_str == '=') { value_str += 1; } + + auto req_verbosity = get_verbosity_from_name(value_str); + if (req_verbosity != Verbosity_INVALID) { + g_stderr_verbosity = req_verbosity; + } else { + char* end = 0; + g_stderr_verbosity = static_cast(strtol(value_str, &end, 10)); + CHECK_F(end && *end == '\0', + "Invalid verbosity. Expected integer, INFO, WARNING, ERROR or OFF, got '%s'", value_str); + } + } else { + argv[arg_dest++] = argv[arg_it]; + } + } + + argc = out_argc; + argv[argc] = nullptr; + } + + static long long now_ns() + { + return duration_cast(high_resolution_clock::now().time_since_epoch()).count(); + } + + // Returns the part of the path after the last / or \ (if any). + const char* filename(const char* path) + { + for (auto ptr = path; *ptr; ++ptr) { + if (*ptr == '/' || *ptr == '\\') { + path = ptr + 1; + } + } + return path; + } + + // ------------------------------------------------------------------------------ + + static void on_atexit() + { + LOG_F(INFO, "atexit"); + flush(); + } + + static void install_signal_handlers(); + + static void write_hex_digit(std::string& out, unsigned num) + { + DCHECK_LT_F(num, 16u); + if (num < 10u) { out.push_back(char('0' + num)); } + else { out.push_back(char('A' + num - 10)); } + } + + static void write_hex_byte(std::string& out, uint8_t n) + { + write_hex_digit(out, n >> 4u); + write_hex_digit(out, n & 0x0f); + } + + static void escape(std::string& out, const std::string& str) + { + for (char c : str) { + /**/ if (c == '\a') { out += "\\a"; } + else if (c == '\b') { out += "\\b"; } + else if (c == '\f') { out += "\\f"; } + else if (c == '\n') { out += "\\n"; } + else if (c == '\r') { out += "\\r"; } + else if (c == '\t') { out += "\\t"; } + else if (c == '\v') { out += "\\v"; } + else if (c == '\\') { out += "\\\\"; } + else if (c == '\'') { out += "\\\'"; } + else if (c == '\"') { out += "\\\""; } + else if (c == ' ') { out += "\\ "; } + else if (0 <= c && c < 0x20) { // ASCI control character: + // else if (c < 0x20 || c != (c & 127)) { // ASCII control character or UTF-8: + out += "\\x"; + write_hex_byte(out, static_cast(c)); + } else { out += c; } + } + } + + Text errno_as_text() + { + char buff[256]; + #if defined(__GLIBC__) && defined(_GNU_SOURCE) + // GNU Version + return Text(strdup(strerror_r(errno, buff, sizeof(buff)))); + #elif defined(__APPLE__) || _POSIX_C_SOURCE >= 200112L + // XSI Version + strerror_r(errno, buff, sizeof(buff)); + return Text(strdup(buff)); + #elif defined(_WIN32) + strerror_s(buff, sizeof(buff), errno); + return Text(strdup(buff)); + #else + // Not thread-safe. + return Text(strdup(strerror(errno))); + #endif + } + + void init(int& argc, char* argv[], const char* verbosity_flag) + { + CHECK_GT_F(argc, 0, "Expected proper argc/argv"); + CHECK_EQ_F(argv[argc], nullptr, "Expected proper argc/argv"); + + s_argv0_filename = filename(argv[0]); + + #ifdef _WIN32 + #define getcwd _getcwd + #endif + + if (!getcwd(s_current_dir, sizeof(s_current_dir))) + { + const auto error_text = errno_as_text(); + LOG_F(WARNING, "Failed to get current working directory: %s", error_text.c_str()); + } + + s_arguments = ""; + for (int i = 0; i < argc; ++i) { + escape(s_arguments, argv[i]); + if (i + 1 < argc) { + s_arguments += " "; + } + } + + if (verbosity_flag) { + parse_args(argc, argv, verbosity_flag); + } + + #if LOGURU_PTLS_NAMES || LOGURU_WINTHREADS + set_thread_name("main thread"); + #elif LOGURU_PTHREADS + char old_thread_name[16] = {0}; + auto this_thread = pthread_self(); + #if defined(__APPLE__) || defined(__linux__) + pthread_getname_np(this_thread, old_thread_name, sizeof(old_thread_name)); + #endif + if (old_thread_name[0] == 0) { + #ifdef __APPLE__ + pthread_setname_np("main thread"); + #elif defined(__FreeBSD__) || defined(__OpenBSD__) + pthread_set_name_np(this_thread, "main thread"); + #elif defined(__linux__) + pthread_setname_np(this_thread, "main thread"); + #endif + } + #endif // LOGURU_PTHREADS + + if (g_stderr_verbosity >= Verbosity_INFO) { + if (g_preamble) { + char preamble_explain[LOGURU_PREAMBLE_WIDTH]; + print_preamble_header(preamble_explain, sizeof(preamble_explain)); + if (g_colorlogtostderr && s_terminal_has_color) { + fprintf(stderr, "%s%s%s\n", terminal_reset(), terminal_dim(), preamble_explain); + } else { + fprintf(stderr, "%s\n", preamble_explain); + } + } + fflush(stderr); + } + LOG_F(INFO, "arguments: %s", s_arguments.c_str()); + if (strlen(s_current_dir) != 0) + { + LOG_F(INFO, "Current dir: %s", s_current_dir); + } + LOG_F(INFO, "stderr verbosity: %d", g_stderr_verbosity); + LOG_F(INFO, "-----------------------------------"); + + install_signal_handlers(); + + atexit(on_atexit); + } + + void shutdown() + { + LOG_F(INFO, "loguru::shutdown()"); + remove_all_callbacks(); + set_fatal_handler(nullptr); + set_verbosity_to_name_callback(nullptr); + set_name_to_verbosity_callback(nullptr); + } + + void write_date_time(char* buff, size_t buff_size) + { + auto now = system_clock::now(); + long long ms_since_epoch = duration_cast(now.time_since_epoch()).count(); + time_t sec_since_epoch = time_t(ms_since_epoch / 1000); + tm time_info; + localtime_r(&sec_since_epoch, &time_info); + snprintf(buff, buff_size, "%04d%02d%02d_%02d%02d%02d.%03lld", + 1900 + time_info.tm_year, 1 + time_info.tm_mon, time_info.tm_mday, + time_info.tm_hour, time_info.tm_min, time_info.tm_sec, ms_since_epoch % 1000); + } + + const char* argv0_filename() + { + return s_argv0_filename.c_str(); + } + + const char* arguments() + { + return s_arguments.c_str(); + } + + const char* current_dir() + { + return s_current_dir; + } + + const char* home_dir() + { + #ifdef _WIN32 + auto user_profile = getenv("USERPROFILE"); + CHECK_F(user_profile != nullptr, "Missing USERPROFILE"); + return user_profile; + #else // _WIN32 + auto home = getenv("HOME"); + CHECK_F(home != nullptr, "Missing HOME"); + return home; + #endif // _WIN32 + } + + void suggest_log_path(const char* prefix, char* buff, unsigned buff_size) + { + if (prefix[0] == '~') { + snprintf(buff, buff_size - 1, "%s%s", home_dir(), prefix + 1); + } else { + snprintf(buff, buff_size - 1, "%s", prefix); + } + + // Check for terminating / + size_t n = strlen(buff); + if (n != 0) { + if (buff[n - 1] != '/') { + CHECK_F(n + 2 < buff_size, "Filename buffer too small"); + buff[n] = '/'; + buff[n + 1] = '\0'; + } + } + + strncat(buff, s_argv0_filename.c_str(), buff_size - strlen(buff) - 1); + strncat(buff, "/", buff_size - strlen(buff) - 1); + write_date_time(buff + strlen(buff), buff_size - strlen(buff)); + strncat(buff, ".log", buff_size - strlen(buff) - 1); + } + + bool create_directories(const char* file_path_const) + { + CHECK_F(file_path_const && *file_path_const); + char* file_path = strdup(file_path_const); + for (char* p = strchr(file_path + 1, '/'); p; p = strchr(p + 1, '/')) { + *p = '\0'; + + #ifdef _WIN32 + if (_mkdir(file_path) == -1) { + #else + if (mkdir(file_path, 0755) == -1) { + #endif + if (errno != EEXIST) { + LOG_F(ERROR, "Failed to create directory '%s'", file_path); + LOG_IF_F(ERROR, errno == EACCES, "EACCES"); + LOG_IF_F(ERROR, errno == ENAMETOOLONG, "ENAMETOOLONG"); + LOG_IF_F(ERROR, errno == ENOENT, "ENOENT"); + LOG_IF_F(ERROR, errno == ENOTDIR, "ENOTDIR"); + LOG_IF_F(ERROR, errno == ELOOP, "ELOOP"); + + *p = '/'; + free(file_path); + return false; + } + } + *p = '/'; + } + free(file_path); + return true; + } + bool add_file(const char* path_in, FileMode mode, Verbosity verbosity) + { + char path[PATH_MAX]; + if (path_in[0] == '~') { + snprintf(path, sizeof(path) - 1, "%s%s", home_dir(), path_in + 1); + } else { + snprintf(path, sizeof(path) - 1, "%s", path_in); + } + + if (!create_directories(path)) { + LOG_F(ERROR, "Failed to create directories to '%s'", path); + } + + const char* mode_str = (mode == FileMode::Truncate ? "w" : "a"); + auto file = fopen(path, mode_str); + if (!file) { + LOG_F(ERROR, "Failed to open '%s'", path); + return false; + } +#if LOGURU_WITH_FILEABS + FileAbs* file_abs = new FileAbs(); // this is deleted in file_close; + snprintf(file_abs->path, sizeof(file_abs->path) - 1, "%s", path); + snprintf(file_abs->mode_str, sizeof(file_abs->mode_str) - 1, "%s", mode_str); + stat(file_abs->path, &file_abs->st); + file_abs->fp = file; + file_abs->verbosity = verbosity; + add_callback(path_in, file_log, file_abs, verbosity, file_close, file_flush); +#else + add_callback(path_in, file_log, file, verbosity, file_close, file_flush); +#endif + + if (mode == FileMode::Append) { + fprintf(file, "\n\n\n\n\n"); + } + if (!s_arguments.empty()) { + fprintf(file, "arguments: %s\n", s_arguments.c_str()); + } + if (strlen(s_current_dir) != 0) { + fprintf(file, "Current dir: %s\n", s_current_dir); + } + fprintf(file, "File verbosity level: %d\n", verbosity); + if (g_preamble) { + char preamble_explain[LOGURU_PREAMBLE_WIDTH]; + print_preamble_header(preamble_explain, sizeof(preamble_explain)); + fprintf(file, "%s\n", preamble_explain); + } + fflush(file); + + LOG_F(INFO, "Logging to '%s', mode: '%s', verbosity: %d", path, mode_str, verbosity); + return true; + } + + // Will be called right before abort(). + void set_fatal_handler(fatal_handler_t handler) + { + s_fatal_handler = handler; + } + + fatal_handler_t get_fatal_handler() + { + return s_fatal_handler; + } + + void set_verbosity_to_name_callback(verbosity_to_name_t callback) + { + s_verbosity_to_name_callback = callback; + } + + void set_name_to_verbosity_callback(name_to_verbosity_t callback) + { + s_name_to_verbosity_callback = callback; + } + + void add_stack_cleanup(const char* find_this, const char* replace_with_this) + { + if (strlen(find_this) <= strlen(replace_with_this)) { + LOG_F(WARNING, "add_stack_cleanup: the replacement should be shorter than the pattern!"); + return; + } + + s_user_stack_cleanups.push_back(StringPair(find_this, replace_with_this)); + } + + static void on_callback_change() + { + s_max_out_verbosity = Verbosity_OFF; + for (const auto& callback : s_callbacks) { + s_max_out_verbosity = std::max(s_max_out_verbosity, callback.verbosity); + } + } + + void add_callback( + const char* id, + log_handler_t callback, + void* user_data, + Verbosity verbosity, + close_handler_t on_close, + flush_handler_t on_flush) + { + std::lock_guard lock(s_mutex); + s_callbacks.push_back(Callback{id, callback, user_data, verbosity, on_close, on_flush, 0}); + on_callback_change(); + } + + // Returns a custom verbosity name if one is available, or nullptr. + // See also set_verbosity_to_name_callback. + const char* get_verbosity_name(Verbosity verbosity) + { + auto name = s_verbosity_to_name_callback + ? (*s_verbosity_to_name_callback)(verbosity) + : nullptr; + + // Use standard replacements if callback fails: + if (!name) + { + if (verbosity <= Verbosity_FATAL) { + name = "FATL"; + } else if (verbosity == Verbosity_ERROR) { + name = "ERR"; + } else if (verbosity == Verbosity_WARNING) { + name = "WARN"; + } else if (verbosity == Verbosity_INFO) { + name = "INFO"; + } + } + + return name; + } + + // Returns Verbosity_INVALID if the name is not found. + // See also set_name_to_verbosity_callback. + Verbosity get_verbosity_from_name(const char* name) + { + auto verbosity = s_name_to_verbosity_callback + ? (*s_name_to_verbosity_callback)(name) + : Verbosity_INVALID; + + // Use standard replacements if callback fails: + if (verbosity == Verbosity_INVALID) { + if (strcmp(name, "OFF") == 0) { + verbosity = Verbosity_OFF; + } else if (strcmp(name, "INFO") == 0) { + verbosity = Verbosity_INFO; + } else if (strcmp(name, "WARNING") == 0) { + verbosity = Verbosity_WARNING; + } else if (strcmp(name, "ERROR") == 0) { + verbosity = Verbosity_ERROR; + } else if (strcmp(name, "FATAL") == 0) { + verbosity = Verbosity_FATAL; + } + } + + return verbosity; + } + + bool remove_callback(const char* id) + { + std::lock_guard lock(s_mutex); + auto it = std::find_if(begin(s_callbacks), end(s_callbacks), [&](const Callback& c) { return c.id == id; }); + if (it != s_callbacks.end()) { + if (it->close) { it->close(it->user_data); } + s_callbacks.erase(it); + on_callback_change(); + return true; + } else { + LOG_F(ERROR, "Failed to locate callback with id '%s'", id); + return false; + } + } + + void remove_all_callbacks() + { + std::lock_guard lock(s_mutex); + for (auto& callback : s_callbacks) { + if (callback.close) { + callback.close(callback.user_data); + } + } + s_callbacks.clear(); + on_callback_change(); + } + + // Returns the maximum of g_stderr_verbosity and all file/custom outputs. + Verbosity current_verbosity_cutoff() + { + return g_stderr_verbosity > s_max_out_verbosity ? + g_stderr_verbosity : s_max_out_verbosity; + } + +#if LOGURU_WINTHREADS + char* get_thread_name_win32() + { + __declspec( thread ) static char thread_name[LOGURU_THREADNAME_WIDTH + 1] = {0}; + return &thread_name[0]; + } +#endif // LOGURU_WINTHREADS + + void set_thread_name(const char* name) + { + #if LOGURU_PTLS_NAMES + (void)pthread_once(&s_pthread_key_once, make_pthread_key_name); + (void)pthread_setspecific(s_pthread_key_name, strdup(name)); + + #elif LOGURU_PTHREADS + #ifdef __APPLE__ + pthread_setname_np(name); + #elif defined(__FreeBSD__) || defined(__OpenBSD__) + pthread_set_name_np(pthread_self(), name); + #elif defined(__linux__) + pthread_setname_np(pthread_self(), name); + #endif + #elif LOGURU_WINTHREADS + strncpy_s(get_thread_name_win32(), LOGURU_THREADNAME_WIDTH + 1, name, _TRUNCATE); + #else // LOGURU_PTHREADS + (void)name; + #endif // LOGURU_PTHREADS + } + +#if LOGURU_PTLS_NAMES + const char* get_thread_name_ptls() + { + (void)pthread_once(&s_pthread_key_once, make_pthread_key_name); + return static_cast(pthread_getspecific(s_pthread_key_name)); + } +#endif // LOGURU_PTLS_NAMES + + void get_thread_name(char* buffer, unsigned long long length, bool right_align_hext_id) + { + CHECK_NE_F(length, 0u, "Zero length buffer in get_thread_name"); + CHECK_NOTNULL_F(buffer, "nullptr in get_thread_name"); +#if LOGURU_PTHREADS + auto thread = pthread_self(); + #if LOGURU_PTLS_NAMES + if (const char* name = get_thread_name_ptls()) { + snprintf(buffer, length, "%s", name); + } else { + buffer[0] = 0; + } + #elif defined(__APPLE__) || defined(__linux__) + pthread_getname_np(thread, buffer, length); + #else + buffer[0] = 0; + #endif + + if (buffer[0] == 0) { + #ifdef __APPLE__ + uint64_t thread_id; + pthread_threadid_np(thread, &thread_id); + #elif defined(__FreeBSD__) + long thread_id; + (void)thr_self(&thread_id); + #elif defined(__OpenBSD__) + unsigned thread_id = -1; + #else + uint64_t thread_id = thread; + #endif + if (right_align_hext_id) { + snprintf(buffer, length, "%*X", static_cast(length - 1), static_cast(thread_id)); + } else { + snprintf(buffer, length, "%X", static_cast(thread_id)); + } + } +#elif LOGURU_WINTHREADS + if (const char* name = get_thread_name_win32()) { + snprintf(buffer, (size_t)length, "%s", name); + } else { + buffer[0] = 0; + } +#else // !LOGURU_WINTHREADS && !LOGURU_WINTHREADS + buffer[0] = 0; +#endif + + } + + // ------------------------------------------------------------------------ + // Stack traces + +#if LOGURU_STACKTRACES + Text demangle(const char* name) + { + int status = -1; + char* demangled = abi::__cxa_demangle(name, 0, 0, &status); + Text result{status == 0 ? demangled : strdup(name)}; + return result; + } + + #if LOGURU_RTTI + template + std::string type_name() + { + auto demangled = demangle(typeid(T).name()); + return demangled.c_str(); + } + #endif // LOGURU_RTTI + + static const StringPairList REPLACE_LIST = { + #if LOGURU_RTTI + { type_name(), "std::string" }, + { type_name(), "std::wstring" }, + { type_name(), "std::u16string" }, + { type_name(), "std::u32string" }, + #endif // LOGURU_RTTI + { "std::__1::", "std::" }, + { "__thiscall ", "" }, + { "__cdecl ", "" }, + }; + + void do_replacements(const StringPairList& replacements, std::string& str) + { + for (auto&& p : replacements) { + if (p.first.size() <= p.second.size()) { + // On gcc, "type_name()" is "std::string" + continue; + } + + size_t it; + while ((it=str.find(p.first)) != std::string::npos) { + str.replace(it, p.first.size(), p.second); + } + } + } + + std::string prettify_stacktrace(const std::string& input) + { + std::string output = input; + + do_replacements(s_user_stack_cleanups, output); + do_replacements(REPLACE_LIST, output); + + try { + std::regex std_allocator_re(R"(,\s*std::allocator<[^<>]+>)"); + output = std::regex_replace(output, std_allocator_re, std::string("")); + + std::regex template_spaces_re(R"(<\s*([^<> ]+)\s*>)"); + output = std::regex_replace(output, template_spaces_re, std::string("<$1>")); + } catch (std::regex_error&) { + // Probably old GCC. + } + + return output; + } + + std::string stacktrace_as_stdstring(int skip) + { + // From https://gist.github.com/fmela/591333 + void* callstack[128]; + const auto max_frames = sizeof(callstack) / sizeof(callstack[0]); + int num_frames = backtrace(callstack, max_frames); + char** symbols = backtrace_symbols(callstack, num_frames); + + std::string result; + // Print stack traces so the most relevant ones are written last + // Rationale: http://yellerapp.com/posts/2015-01-22-upside-down-stacktraces.html + for (int i = num_frames - 1; i >= skip; --i) { + char buf[1024]; + Dl_info info; + if (dladdr(callstack[i], &info) && info.dli_sname) { + char* demangled = NULL; + int status = -1; + if (info.dli_sname[0] == '_') { + demangled = abi::__cxa_demangle(info.dli_sname, 0, 0, &status); + } + snprintf(buf, sizeof(buf), "%-3d %*p %s + %zd\n", + i - skip, int(2 + sizeof(void*) * 2), callstack[i], + status == 0 ? demangled : + info.dli_sname == 0 ? symbols[i] : info.dli_sname, + static_cast(callstack[i]) - static_cast(info.dli_saddr)); + free(demangled); + } else { + snprintf(buf, sizeof(buf), "%-3d %*p %s\n", + i - skip, int(2 + sizeof(void*) * 2), callstack[i], symbols[i]); + } + result += buf; + } + free(symbols); + + if (num_frames == max_frames) { + result = "[truncated]\n" + result; + } + + if (!result.empty() && result[result.size() - 1] == '\n') { + result.resize(result.size() - 1); + } + + return prettify_stacktrace(result); + } + +#else // LOGURU_STACKTRACES + Text demangle(const char* name) + { + return Text(strdup(name)); + } + + std::string stacktrace_as_stdstring(int) + { + // No stacktraces available on this platform" + return ""; + } + +#endif // LOGURU_STACKTRACES + + Text stacktrace(int skip) + { + auto str = stacktrace_as_stdstring(skip + 1); + return Text(strdup(str.c_str())); + } + + // ------------------------------------------------------------------------ + + static void print_preamble_header(char* out_buff, size_t out_buff_size) + { + if (out_buff_size == 0) { return; } + out_buff[0] = '\0'; + long pos = 0; + if (g_preamble_date && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "date "); + } + if (g_preamble_time && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "time "); + } + if (g_preamble_uptime && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "( uptime ) "); + } + if (g_preamble_thread && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "[%-*s]", LOGURU_THREADNAME_WIDTH, " thread name/id"); + } + if (g_preamble_file && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "%*s:line ", LOGURU_FILENAME_WIDTH, "file"); + } + if (g_preamble_verbose && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, " v"); + } + if (g_preamble_pipe && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "| "); + } + } + + static void print_preamble(char* out_buff, size_t out_buff_size, Verbosity verbosity, const char* file, unsigned line) + { + if (out_buff_size == 0) { return; } + out_buff[0] = '\0'; + if (!g_preamble) { return; } + long long ms_since_epoch = duration_cast(system_clock::now().time_since_epoch()).count(); + time_t sec_since_epoch = time_t(ms_since_epoch / 1000); + tm time_info; + localtime_r(&sec_since_epoch, &time_info); + + auto uptime_ms = duration_cast(steady_clock::now() - s_start_time).count(); + auto uptime_sec = uptime_ms / 1000.0; + + char thread_name[LOGURU_THREADNAME_WIDTH + 1] = {0}; + get_thread_name(thread_name, LOGURU_THREADNAME_WIDTH + 1, true); + + if (s_strip_file_path) { + file = filename(file); + } + + char level_buff[6]; + const char* custom_level_name = get_verbosity_name(verbosity); + if (custom_level_name) { + snprintf(level_buff, sizeof(level_buff) - 1, "%s", custom_level_name); + } else { + snprintf(level_buff, sizeof(level_buff) - 1, "% 4d", verbosity); + } + + long pos = 0; + + if (g_preamble_date && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "%04d-%02d-%02d ", + 1900 + time_info.tm_year, 1 + time_info.tm_mon, time_info.tm_mday); + } + if (g_preamble_time && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "%02d:%02d:%02d.%03lld ", + time_info.tm_hour, time_info.tm_min, time_info.tm_sec, ms_since_epoch % 1000); + } + if (g_preamble_uptime && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "(%8.3fs) ", + uptime_sec); + } + if (g_preamble_thread && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "[%-*s]", + LOGURU_THREADNAME_WIDTH, thread_name); + } + if (g_preamble_file && pos < out_buff_size) { + char shortened_filename[LOGURU_FILENAME_WIDTH + 1]; + snprintf(shortened_filename, LOGURU_FILENAME_WIDTH + 1, "%s", file); + pos += snprintf(out_buff + pos, out_buff_size - pos, "%*s:%-5u ", + LOGURU_FILENAME_WIDTH, shortened_filename, line); + } + if (g_preamble_verbose && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "%4s", + level_buff); + } + if (g_preamble_pipe && pos < out_buff_size) { + pos += snprintf(out_buff + pos, out_buff_size - pos, "| "); + } + } + + // stack_trace_skip is just if verbosity == FATAL. + static void log_message(int stack_trace_skip, Message& message, bool with_indentation, bool abort_if_fatal) + { + const auto verbosity = message.verbosity; + std::lock_guard lock(s_mutex); + + if (message.verbosity == Verbosity_FATAL) { + auto st = loguru::stacktrace(stack_trace_skip + 2); + if (!st.empty()) { + RAW_LOG_F(ERROR, "Stack trace:\n%s", st.c_str()); + } + + auto ec = loguru::get_error_context(); + if (!ec.empty()) { + RAW_LOG_F(ERROR, "%s", ec.c_str()); + } + } + + if (with_indentation) { + message.indentation = indentation(s_stderr_indentation); + } + + if (verbosity <= g_stderr_verbosity) { + if (g_colorlogtostderr && s_terminal_has_color) { + if (verbosity > Verbosity_WARNING) { + fprintf(stderr, "%s%s%s%s%s%s%s%s\n", + terminal_reset(), + terminal_dim(), + message.preamble, + message.indentation, + verbosity == Verbosity_INFO ? terminal_reset() : "", // un-dim for info + message.prefix, + message.message, + terminal_reset()); + } else { + fprintf(stderr, "%s%s%s%s%s%s%s\n", + terminal_reset(), + verbosity == Verbosity_WARNING ? terminal_yellow() : terminal_red(), + message.preamble, + message.indentation, + message.prefix, + message.message, + terminal_reset()); + } + } else { + fprintf(stderr, "%s%s%s%s\n", + message.preamble, message.indentation, message.prefix, message.message); + } + + if (g_flush_interval_ms == 0) { + fflush(stderr); + } else { + s_needs_flushing = true; + } + } + + for (auto& p : s_callbacks) { + if (verbosity <= p.verbosity) { + if (with_indentation) { + message.indentation = indentation(p.indentation); + } + p.callback(p.user_data, message); + if (g_flush_interval_ms == 0) { + if (p.flush) { p.flush(p.user_data); } + } else { + s_needs_flushing = true; + } + } + } + + if (g_flush_interval_ms > 0 && !s_flush_thread) { + s_flush_thread = new std::thread([](){ + for (;;) { + if (s_needs_flushing) { + flush(); + } + std::this_thread::sleep_for(std::chrono::milliseconds(g_flush_interval_ms)); + } + }); + } + + if (message.verbosity == Verbosity_FATAL) { + flush(); + + if (s_fatal_handler) { + s_fatal_handler(message); + flush(); + } + + if (abort_if_fatal) { +#if LOGURU_CATCH_SIGABRT && !defined(_WIN32) + // Make sure we don't catch our own abort: + signal(SIGABRT, SIG_DFL); +#endif + abort(); + } + } + } + + // stack_trace_skip is just if verbosity == FATAL. + void log_to_everywhere(int stack_trace_skip, Verbosity verbosity, + const char* file, unsigned line, + const char* prefix, const char* buff) + { + char preamble_buff[LOGURU_PREAMBLE_WIDTH]; + print_preamble(preamble_buff, sizeof(preamble_buff), verbosity, file, line); + auto message = Message{verbosity, file, line, preamble_buff, "", prefix, buff}; + log_message(stack_trace_skip + 1, message, true, true); + } + +#if LOGURU_USE_FMTLIB + void log(Verbosity verbosity, const char* file, unsigned line, const char* format, fmt::ArgList args) + { + auto formatted = fmt::format(format, args); + log_to_everywhere(1, verbosity, file, line, "", formatted.c_str()); + } + + void raw_log(Verbosity verbosity, const char* file, unsigned line, const char* format, fmt::ArgList args) + { + auto formatted = fmt::format(format, args); + auto message = Message{verbosity, file, line, "", "", "", formatted.c_str()}; + log_message(1, message, false, true); + } + +#else + void log(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto buff = vtextprintf(format, vlist); + log_to_everywhere(1, verbosity, file, line, "", buff.c_str()); + va_end(vlist); + } + + void raw_log(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto buff = vtextprintf(format, vlist); + auto message = Message{verbosity, file, line, "", "", "", buff.c_str()}; + log_message(1, message, false, true); + va_end(vlist); + } +#endif + + void flush() + { + std::lock_guard lock(s_mutex); + fflush(stderr); + for (const auto& callback : s_callbacks) + { + if (callback.flush) { + callback.flush(callback.user_data); + } + } + s_needs_flushing = false; + } + + LogScopeRAII::LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) + : _verbosity(verbosity), _file(file), _line(line) + { + if (verbosity <= current_verbosity_cutoff()) { + std::lock_guard lock(s_mutex); + _indent_stderr = (verbosity <= g_stderr_verbosity); + _start_time_ns = now_ns(); + va_list vlist; + va_start(vlist, format); + vsnprintf(_name, sizeof(_name), format, vlist); + log_to_everywhere(1, _verbosity, file, line, "{ ", _name); + va_end(vlist); + + if (_indent_stderr) { + ++s_stderr_indentation; + } + + for (auto& p : s_callbacks) { + if (verbosity <= p.verbosity) { + ++p.indentation; + } + } + } else { + _file = nullptr; + } + } + + LogScopeRAII::~LogScopeRAII() + { + if (_file) { + std::lock_guard lock(s_mutex); + if (_indent_stderr && s_stderr_indentation > 0) { + --s_stderr_indentation; + } + for (auto& p : s_callbacks) { + // Note: Callback indentation cannot change! + if (_verbosity <= p.verbosity) { + // in unlikely case this callback is new + if (p.indentation > 0) { + --p.indentation; + } + } + } +#if LOGURU_VERBOSE_SCOPE_ENDINGS + auto duration_sec = (now_ns() - _start_time_ns) / 1e9; + auto buff = textprintf("%.*f s: %s", LOGURU_SCOPE_TIME_PRECISION, duration_sec, _name); + log_to_everywhere(1, _verbosity, _file, _line, "} ", buff.c_str()); +#else + log_to_everywhere(1, _verbosity, _file, _line, "}", ""); +#endif + } + } + + void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto buff = vtextprintf(format, vlist); + log_to_everywhere(stack_trace_skip + 1, Verbosity_FATAL, file, line, expr, buff.c_str()); + va_end(vlist); + abort(); // log_to_everywhere already does this, but this makes the analyzer happy. + } + + void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line) + { + log_and_abort(stack_trace_skip + 1, expr, file, line, " "); + } + + // ---------------------------------------------------------------------------- + // Streams: + + std::string vstrprintf(const char* format, va_list vlist) + { + auto text = vtextprintf(format, vlist); + std::string result = text.c_str(); + return result; + } + + std::string strprintf(const char* format, ...) + { + va_list vlist; + va_start(vlist, format); + auto result = vstrprintf(format, vlist); + va_end(vlist); + return result; + } + + #if LOGURU_WITH_STREAMS + + StreamLogger::~StreamLogger() noexcept(false) + { + auto message = _ss.str(); + log(_verbosity, _file, _line, "%s", message.c_str()); + } + + AbortLogger::~AbortLogger() noexcept(false) + { + auto message = _ss.str(); + loguru::log_and_abort(1, _expr, _file, _line, "%s", message.c_str()); + } + + #endif // LOGURU_WITH_STREAMS + + // ---------------------------------------------------------------------------- + // 888888 88""Yb 88""Yb dP"Yb 88""Yb dP""b8 dP"Yb 88b 88 888888 888888 Yb dP 888888 + // 88__ 88__dP 88__dP dP Yb 88__dP dP `" dP Yb 88Yb88 88 88__ YbdP 88 + // 88"" 88"Yb 88"Yb Yb dP 88"Yb Yb Yb dP 88 Y88 88 88"" dPYb 88 + // 888888 88 Yb 88 Yb YbodP 88 Yb YboodP YbodP 88 Y8 88 888888 dP Yb 88 + // ---------------------------------------------------------------------------- + + struct StringStream + { + std::string str; + }; + + // Use this in your EcPrinter implementations. + void stream_print(StringStream& out_string_stream, const char* text) + { + out_string_stream.str += text; + } + + // ---------------------------------------------------------------------------- + + using ECPtr = EcEntryBase*; + +#if defined(_WIN32) || (defined(__APPLE__) && !TARGET_OS_IPHONE) + #ifdef __APPLE__ + #define LOGURU_THREAD_LOCAL __thread + #else + #define LOGURU_THREAD_LOCAL thread_local + #endif + static LOGURU_THREAD_LOCAL ECPtr thread_ec_ptr = nullptr; + + ECPtr& get_thread_ec_head_ref() + { + return thread_ec_ptr; + } +#else // !thread_local + static pthread_once_t s_ec_pthread_once = PTHREAD_ONCE_INIT; + static pthread_key_t s_ec_pthread_key; + + void free_ec_head_ref(void* io_error_context) + { + delete reinterpret_cast(io_error_context); + } + + void ec_make_pthread_key() + { + (void)pthread_key_create(&s_ec_pthread_key, free_ec_head_ref); + } + + ECPtr& get_thread_ec_head_ref() + { + (void)pthread_once(&s_ec_pthread_once, ec_make_pthread_key); + auto ec = reinterpret_cast(pthread_getspecific(s_ec_pthread_key)); + if (ec == nullptr) { + ec = new ECPtr(nullptr); + (void)pthread_setspecific(s_ec_pthread_key, ec); + } + return *ec; + } +#endif // !thread_local + + // ---------------------------------------------------------------------------- + + EcHandle get_thread_ec_handle() + { + return get_thread_ec_head_ref(); + } + + Text get_error_context() + { + return get_error_context_for(get_thread_ec_head_ref()); + } + + Text get_error_context_for(const EcEntryBase* ec_head) + { + std::vector stack; + while (ec_head) { + stack.push_back(ec_head); + ec_head = ec_head->_previous; + } + std::reverse(stack.begin(), stack.end()); + + StringStream result; + if (!stack.empty()) { + result.str += "------------------------------------------------\n"; + for (auto entry : stack) { + const auto description = std::string(entry->_descr) + ":"; + auto prefix = textprintf("[ErrorContext] %*s:%-5u %-20s ", + LOGURU_FILENAME_WIDTH, filename(entry->_file), entry->_line, description.c_str()); + result.str += prefix.c_str(); + entry->print_value(result); + result.str += "\n"; + } + result.str += "------------------------------------------------"; + } + return Text(strdup(result.str.c_str())); + } + + EcEntryBase::EcEntryBase(const char* file, unsigned line, const char* descr) + : _file(file), _line(line), _descr(descr) + { + EcEntryBase*& ec_head = get_thread_ec_head_ref(); + _previous = ec_head; + ec_head = this; + } + + EcEntryBase::~EcEntryBase() + { + get_thread_ec_head_ref() = _previous; + } + + // ------------------------------------------------------------------------ + + Text ec_to_text(const char* value) + { + // Add quotes around the string to make it obvious where it begin and ends. + // This is great for detecting erroneous leading or trailing spaces in e.g. an identifier. + auto str = "\"" + std::string(value) + "\""; + return Text{strdup(str.c_str())}; + } + + Text ec_to_text(char c) + { + // Add quotes around the character to make it obvious where it begin and ends. + std::string str = "'"; + + auto write_hex_digit = [&](unsigned num) + { + if (num < 10u) { str += char('0' + num); } + else { str += char('a' + num - 10); } + }; + + auto write_hex_16 = [&](uint16_t n) + { + write_hex_digit((n >> 12u) & 0x0f); + write_hex_digit((n >> 8u) & 0x0f); + write_hex_digit((n >> 4u) & 0x0f); + write_hex_digit((n >> 0u) & 0x0f); + }; + + if (c == '\\') { str += "\\\\"; } + else if (c == '\"') { str += "\\\""; } + else if (c == '\'') { str += "\\\'"; } + else if (c == '\0') { str += "\\0"; } + else if (c == '\b') { str += "\\b"; } + else if (c == '\f') { str += "\\f"; } + else if (c == '\n') { str += "\\n"; } + else if (c == '\r') { str += "\\r"; } + else if (c == '\t') { str += "\\t"; } + else if (0 <= c && c < 0x20) { + str += "\\u"; + write_hex_16(static_cast(c)); + } else { str += c; } + + str += "'"; + + return Text{strdup(str.c_str())}; + } + + #define DEFINE_EC(Type) \ + Text ec_to_text(Type value) \ + { \ + auto str = std::to_string(value); \ + return Text{strdup(str.c_str())}; \ + } + + DEFINE_EC(int) + DEFINE_EC(unsigned int) + DEFINE_EC(long) + DEFINE_EC(unsigned long) + DEFINE_EC(long long) + DEFINE_EC(unsigned long long) + DEFINE_EC(float) + DEFINE_EC(double) + DEFINE_EC(long double) + + #undef DEFINE_EC + + Text ec_to_text(EcHandle ec_handle) + { + Text parent_ec = get_error_context_for(ec_handle); + char* with_newline = reinterpret_cast(malloc(strlen(parent_ec.c_str()) + 2)); + with_newline[0] = '\n'; + strcpy(with_newline + 1, parent_ec.c_str()); + return Text(with_newline); + } + + // ---------------------------------------------------------------------------- + +} // namespace loguru + +// ---------------------------------------------------------------------------- +// .dP"Y8 88 dP""b8 88b 88 db 88 .dP"Y8 +// `Ybo." 88 dP `" 88Yb88 dPYb 88 `Ybo." +// o.`Y8b 88 Yb "88 88 Y88 dP__Yb 88 .o o.`Y8b +// 8bodP' 88 YboodP 88 Y8 dP""""Yb 88ood8 8bodP' +// ---------------------------------------------------------------------------- + +#ifdef _WIN32 +namespace loguru { + void install_signal_handlers() + { + #if defined(_MSC_VER) + #pragma message ( "No signal handlers on Win32" ) + #else + #warning "No signal handlers on Win32" + #endif + } +} // namespace loguru + +#else // _WIN32 + +namespace loguru +{ + struct Signal + { + int number; + const char* name; + }; + const Signal ALL_SIGNALS[] = { +#if LOGURU_CATCH_SIGABRT + { SIGABRT, "SIGABRT" }, +#endif + { SIGBUS, "SIGBUS" }, + { SIGFPE, "SIGFPE" }, + { SIGILL, "SIGILL" }, + { SIGINT, "SIGINT" }, + { SIGSEGV, "SIGSEGV" }, + { SIGTERM, "SIGTERM" }, + }; + + void write_to_stderr(const char* data, size_t size) + { + auto result = write(STDERR_FILENO, data, size); + (void)result; // Ignore errors. + } + + void write_to_stderr(const char* data) + { + write_to_stderr(data, strlen(data)); + } + + void call_default_signal_handler(int signal_number) + { + struct sigaction sig_action; + memset(&sig_action, 0, sizeof(sig_action)); + sigemptyset(&sig_action.sa_mask); + sig_action.sa_handler = SIG_DFL; + sigaction(signal_number, &sig_action, NULL); + kill(getpid(), signal_number); + } + + void signal_handler(int signal_number, siginfo_t*, void*) + { + const char* signal_name = "UNKNOWN SIGNAL"; + + for (const auto& s : ALL_SIGNALS) { + if (s.number == signal_number) { + signal_name = s.name; + break; + } + } + + // -------------------------------------------------------------------- + /* There are few things that are safe to do in a signal handler, + but writing to stderr is one of them. + So we first print out what happened to stderr so we're sure that gets out, + then we do the unsafe things, like logging the stack trace. + */ + + if (g_colorlogtostderr && s_terminal_has_color) { + write_to_stderr(terminal_reset()); + write_to_stderr(terminal_bold()); + write_to_stderr(terminal_light_red()); + } + write_to_stderr("\n"); + write_to_stderr("Loguru caught a signal: "); + write_to_stderr(signal_name); + write_to_stderr("\n"); + if (g_colorlogtostderr && s_terminal_has_color) { + write_to_stderr(terminal_reset()); + } + + // -------------------------------------------------------------------- + +#if LOGURU_UNSAFE_SIGNAL_HANDLER + // -------------------------------------------------------------------- + /* Now we do unsafe things. This can for example lead to deadlocks if + the signal was triggered from the system's memory management functions + and the code below tries to do allocations. + */ + + flush(); + char preamble_buff[LOGURU_PREAMBLE_WIDTH]; + print_preamble(preamble_buff, sizeof(preamble_buff), Verbosity_FATAL, "", 0); + auto message = Message{Verbosity_FATAL, "", 0, preamble_buff, "", "Signal: ", signal_name}; + try { + log_message(1, message, false, false); + } catch (...) { + // This can happed due to s_fatal_handler. + write_to_stderr("Exception caught and ignored by Loguru signal handler.\n"); + } + flush(); + + // -------------------------------------------------------------------- +#endif // LOGURU_UNSAFE_SIGNAL_HANDLER + + call_default_signal_handler(signal_number); + } + + void install_signal_handlers() + { + struct sigaction sig_action; + memset(&sig_action, 0, sizeof(sig_action)); + sigemptyset(&sig_action.sa_mask); + sig_action.sa_flags |= SA_SIGINFO; + sig_action.sa_sigaction = &signal_handler; + for (const auto& s : ALL_SIGNALS) { + CHECK_F(sigaction(s.number, &sig_action, NULL) != -1, + "Failed to install handler for %s", s.name); + } + } +} // namespace loguru + +#endif // _WIN32 + +#endif // LOGURU_IMPLEMENTATION diff --git a/src/includes/3thparty/emilib/loguru.hpp b/src/includes/3thparty/emilib/loguru.hpp new file mode 100644 index 0000000..76e2a82 --- /dev/null +++ b/src/includes/3thparty/emilib/loguru.hpp @@ -0,0 +1,1324 @@ +/* +Loguru logging library for C++, by Emil Ernerfeldt. +www.github.com/emilk/loguru +If you find Loguru useful, please let me know on twitter or in a mail! +Twitter: @ernerfeldt +Mail: emil.ernerfeldt@gmail.com +Website: www.ilikebigbits.com + +# License + This software is in the public domain. Where that dedication is not + recognized, you are granted a perpetual, irrevocable license to copy + and modify this file as you see fit. + +# Inspiration + Much of Loguru was inspired by GLOG, https://code.google.com/p/google-glog/. + The choice of public domain is fully due Sean T. Barrett + and his wonderful stb libraries at https://github.com/nothings/stb. + +# Version history + * Version 0.1.0 - 2015-03-22 - Works great on Mac. + * Version 0.2.0 - 2015-09-17 - Removed the only dependency. + * Version 0.3.0 - 2015-10-02 - Drop-in replacement for most of GLOG + * Version 0.4.0 - 2015-10-07 - Single-file! + * Version 0.5.0 - 2015-10-17 - Improved file logging + * Version 0.6.0 - 2015-10-24 - Add stack traces + * Version 0.7.0 - 2015-10-27 - Signals + * Version 0.8.0 - 2015-10-30 - Color logging. + * Version 0.9.0 - 2015-11-26 - ABORT_S and proper handling of FATAL + * Version 1.0.0 - 2016-02-14 - ERROR_CONTEXT + * Version 1.1.0 - 2016-02-19 - -v OFF, -v INFO etc + * Version 1.1.1 - 2016-02-20 - textprintf vs strprintf + * Version 1.1.2 - 2016-02-22 - Remove g_alsologtostderr + * Version 1.1.3 - 2016-02-29 - ERROR_CONTEXT as linked list + * Version 1.2.0 - 2016-03-19 - Add get_thread_name() + * Version 1.2.1 - 2016-03-20 - Minor fixes + * Version 1.2.2 - 2016-03-29 - Fix issues with set_fatal_handler throwing an exception + * Version 1.2.3 - 2016-05-16 - Log current working directory in loguru::init(). + * Version 1.2.4 - 2016-05-18 - Custom replacement for -v in loguru::init() by bjoernpollex + * Version 1.2.5 - 2016-05-18 - Add ability to print ERROR_CONTEXT of parent thread. + * Version 1.2.6 - 2016-05-19 - Bug fix regarding VLOG verbosity argument lacking (). + * Version 1.2.7 - 2016-05-23 - Fix PATH_MAX problem. + * Version 1.2.8 - 2016-05-26 - Add shutdown() and remove_all_callbacks() + * Version 1.2.9 - 2016-06-09 - Use a monotonic clock for uptime. + * Version 1.3.0 - 2016-07-20 - Fix issues with callback flush/close not being called. + * Version 1.3.1 - 2016-07-20 - Add LOGURU_UNSAFE_SIGNAL_HANDLER to toggle stacktrace on signals. + * Version 1.3.2 - 2016-07-20 - Add loguru::arguments() + * Version 1.4.0 - 2016-09-15 - Semantic versioning + add loguru::create_directories + * Version 1.4.1 - 2016-09-29 - Customize formating with LOGURU_FILENAME_WIDTH + * Version 1.5.0 - 2016-12-22 - LOGURU_USE_FMTLIB by kolis and LOGURU_WITH_FILEABS by scinart + * Version 1.5.1 - 2017-08-08 - Terminal colors on Windows 10 thanks to looki + * Version 1.6.0 - 2018-01-03 - Add LOGURU_RTTI and LOGURU_STACKTRACES settings + * Version 1.7.0 - 2018-01-03 - Add ability to turn off the preamble with loguru::g_preamble + * Version 1.7.1 - 2018-04-05 - Add function get_fatal_handler + * Version 1.7.2 - 2018-04-22 - Fix a bug where large file names could cause stack corruption (thanks @ccamporesi) + * Version 1.8.0 - 2018-04-23 - Shorten long file names to keep preamble fixed width + * Version 1.9.0 - 2018-09-22 - Adjust terminal colors, add LOGURU_VERBOSE_SCOPE_ENDINGS, add LOGURU_SCOPE_TIME_PRECISION, add named log levels + * Version 2.0.0 - 2018-09-22 - Split loguru.hpp into loguru.hpp and loguru.cpp + +# Compiling + Just include where you want to use Loguru. + Then, in one .cpp file #include + Make sure you compile with -std=c++11 -lstdc++ -lpthread -ldl + +# Usage + For details, please see the official documentation at emilk.github.io/loguru + + #include + + int main(int argc, char* argv[]) { + loguru::init(argc, argv); + + // Put every log message in "everything.log": + loguru::add_file("everything.log", loguru::Append, loguru::Verbosity_MAX); + + LOG_F(INFO, "The magic number is %d", 42); + } + +*/ + +#if defined(LOGURU_IMPLEMENTATION) + #warning "You are defining LOGURU_IMPLEMENTATION. This is for older versions of Loguru. You should now instead include loguru.cpp (or build it and link with it)" +#endif + +// Disable all warnings from gcc/clang: +#if defined(__clang__) + #pragma clang system_header +#elif defined(__GNUC__) + #pragma GCC system_header +#endif + +#ifndef LOGURU_HAS_DECLARED_FORMAT_HEADER +#define LOGURU_HAS_DECLARED_FORMAT_HEADER + +// Semantic versioning. Loguru version can be printed with printf("%d.%d.%d", LOGURU_VERSION_MAJOR, LOGURU_VERSION_MINOR, LOGURU_VERSION_PATCH); +#define LOGURU_VERSION_MAJOR 2 +#define LOGURU_VERSION_MINOR 0 +#define LOGURU_VERSION_PATCH 0 + +#if defined(_MSC_VER) +#include // Needed for _In_z_ etc annotations +#endif + +// ---------------------------------------------------------------------------- + +#ifndef LOGURU_EXPORT + // Define to your project's export declaration if needed for use in a shared library. + #define LOGURU_EXPORT +#endif + +#ifndef LOGURU_SCOPE_TEXT_SIZE + // Maximum length of text that can be printed by a LOG_SCOPE. + // This should be long enough to get most things, but short enough not to clutter the stack. + #define LOGURU_SCOPE_TEXT_SIZE 196 +#endif + +#ifndef LOGURU_FILENAME_WIDTH + // Width of the column containing the file name + #define LOGURU_FILENAME_WIDTH 23 +#endif + +#ifndef LOGURU_THREADNAME_WIDTH + // Width of the column containing the thread name + #define LOGURU_THREADNAME_WIDTH 16 +#endif + +#ifndef LOGURU_SCOPE_TIME_PRECISION + // Resolution of scope timers. 3=ms, 6=us, 9=ns + #define LOGURU_SCOPE_TIME_PRECISION 3 +#endif + +#ifndef LOGURU_CATCH_SIGABRT + // Should Loguru catch SIGABRT to print stack trace etc? + #define LOGURU_CATCH_SIGABRT 1 +#endif + +#ifndef LOGURU_VERBOSE_SCOPE_ENDINGS + // Show milliseconds and scope name at end of scope. + #define LOGURU_VERBOSE_SCOPE_ENDINGS 1 +#endif + +#ifndef LOGURU_REDEFINE_ASSERT + #define LOGURU_REDEFINE_ASSERT 0 +#endif + +#ifndef LOGURU_WITH_STREAMS + #define LOGURU_WITH_STREAMS 0 +#endif + +#ifndef LOGURU_REPLACE_GLOG + #define LOGURU_REPLACE_GLOG 0 +#endif + +#if LOGURU_REPLACE_GLOG + #undef LOGURU_WITH_STREAMS + #define LOGURU_WITH_STREAMS 1 +#endif + +#ifndef LOGURU_UNSAFE_SIGNAL_HANDLER + #define LOGURU_UNSAFE_SIGNAL_HANDLER 1 +#endif + +#if LOGURU_IMPLEMENTATION + #undef LOGURU_WITH_STREAMS + #define LOGURU_WITH_STREAMS 1 +#endif + +#ifndef LOGURU_USE_FMTLIB + #define LOGURU_USE_FMTLIB 0 +#endif + +#ifndef LOGURU_WITH_FILEABS + #define LOGURU_WITH_FILEABS 0 +#endif + +#ifndef LOGURU_RTTI +#if defined(__clang__) + #if __has_feature(cxx_rtti) + #define LOGURU_RTTI 1 + #endif +#elif defined(__GNUG__) + #if defined(__GXX_RTTI) + #define LOGURU_RTTI 1 + #endif +#elif defined(_MSC_VER) + #if defined(_CPPRTTI) + #define LOGURU_RTTI 1 + #endif +#endif +#endif + +// -------------------------------------------------------------------- +// Utility macros + +#define LOGURU_CONCATENATE_IMPL(s1, s2) s1 ## s2 +#define LOGURU_CONCATENATE(s1, s2) LOGURU_CONCATENATE_IMPL(s1, s2) + +#ifdef __COUNTER__ +# define LOGURU_ANONYMOUS_VARIABLE(str) LOGURU_CONCATENATE(str, __COUNTER__) +#else +# define LOGURU_ANONYMOUS_VARIABLE(str) LOGURU_CONCATENATE(str, __LINE__) +#endif + +#if defined(__clang__) || defined(__GNUC__) + // Helper macro for declaring functions as having similar signature to printf. + // This allows the compiler to catch format errors at compile-time. + #define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) __attribute__((__format__ (__printf__, fmtarg, firstvararg))) + #define LOGURU_FORMAT_STRING_TYPE const char* +#elif defined(_MSC_VER) + #define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) + #define LOGURU_FORMAT_STRING_TYPE _In_z_ _Printf_format_string_ const char* +#else + #define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) + #define LOGURU_FORMAT_STRING_TYPE const char* +#endif + +// Used to mark log_and_abort for the benefit of the static analyzer and optimizer. +#if defined(_MSC_VER) +#define LOGURU_NORETURN __declspec(noreturn) +#else +#define LOGURU_NORETURN __attribute__((noreturn)) +#endif + +#if defined(_MSC_VER) +#define LOGURU_PREDICT_FALSE(x) (x) +#define LOGURU_PREDICT_TRUE(x) (x) +#else +#define LOGURU_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define LOGURU_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#endif + +#if LOGURU_USE_FMTLIB + #include +#endif + +// -------------------------------------------------------------------- + +namespace loguru +{ + // Simple RAII ownership of a char*. + class LOGURU_EXPORT Text + { + public: + explicit Text(char* owned_str) : _str(owned_str) {} + ~Text(); + Text(Text&& t) + { + _str = t._str; + t._str = nullptr; + } + Text(Text& t) = delete; + Text& operator=(Text& t) = delete; + void operator=(Text&& t) = delete; + + const char* c_str() const { return _str; } + bool empty() const { return _str == nullptr || *_str == '\0'; } + + char* release() + { + auto result = _str; + _str = nullptr; + return result; + } + + private: + char* _str; + }; + + // Like printf, but returns the formated text. + LOGURU_EXPORT + Text textprintf(LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(1, 2); + + // Overloaded for variadic template matching. + LOGURU_EXPORT + Text textprintf(); + + using Verbosity = int; + +#undef FATAL +#undef ERROR +#undef WARNING +#undef INFO +#undef MAX + + enum NamedVerbosity : Verbosity + { + // Used to mark an invalid verbosity. Do not log to this level. + Verbosity_INVALID = -10, // Never do LOG_F(INVALID) + + // You may use Verbosity_OFF on g_stderr_verbosity, but for nothing else! + Verbosity_OFF = -9, // Never do LOG_F(OFF) + + // Prefer to use ABORT_F or ABORT_S over LOG_F(FATAL) or LOG_S(FATAL). + Verbosity_FATAL = -3, + Verbosity_ERROR = -2, + Verbosity_WARNING = -1, + + // Normal messages. By default written to stderr. + Verbosity_INFO = 0, + + // Same as Verbosity_INFO in every way. + Verbosity_0 = 0, + + // Verbosity levels 1-9 are generally not written to stderr, but are written to file. + Verbosity_1 = +1, + Verbosity_2 = +2, + Verbosity_3 = +3, + Verbosity_4 = +4, + Verbosity_5 = +5, + Verbosity_6 = +6, + Verbosity_7 = +7, + Verbosity_8 = +8, + Verbosity_9 = +9, + + // Don not use higher verbosity levels, as that will make grepping log files harder. + Verbosity_MAX = +9, + }; + + struct Message + { + // You would generally print a Message by just concating the buffers without spacing. + // Optionally, ignore preamble and indentation. + Verbosity verbosity; // Already part of preamble + const char* filename; // Already part of preamble + unsigned line; // Already part of preamble + const char* preamble; // Date, time, uptime, thread, file:line, verbosity. + const char* indentation; // Just a bunch of spacing. + const char* prefix; // Assertion failure info goes here (or ""). + const char* message; // User message goes here. + }; + + /* Everything with a verbosity equal or greater than g_stderr_verbosity will be + written to stderr. You can set this in code or via the -v argument. + Set to loguru::Verbosity_OFF to write nothing to stderr. + Default is 0, i.e. only log ERROR, WARNING and INFO are written to stderr. + */ + LOGURU_EXPORT extern Verbosity g_stderr_verbosity; + LOGURU_EXPORT extern bool g_colorlogtostderr; // True by default. + LOGURU_EXPORT extern unsigned g_flush_interval_ms; // 0 (unbuffered) by default. + LOGURU_EXPORT extern bool g_preamble; // Prefix each log line with date, time etc? True by default. + + // Turn off individual parts of the preamble + LOGURU_EXPORT extern bool g_preamble_date; // The date field + LOGURU_EXPORT extern bool g_preamble_time; // The time of the current day + LOGURU_EXPORT extern bool g_preamble_uptime; // The time since init call + LOGURU_EXPORT extern bool g_preamble_thread; // The logging thread + LOGURU_EXPORT extern bool g_preamble_file; // The file from which the log originates from + LOGURU_EXPORT extern bool g_preamble_verbose; // The verbosity field + LOGURU_EXPORT extern bool g_preamble_pipe; // The pipe symbol right before the message + + // May not throw! + typedef void (*log_handler_t)(void* user_data, const Message& message); + typedef void (*close_handler_t)(void* user_data); + typedef void (*flush_handler_t)(void* user_data); + + // May throw if that's how you'd like to handle your errors. + typedef void (*fatal_handler_t)(const Message& message); + + // Given a verbosity level, return the level's name or nullptr. + typedef const char* (*verbosity_to_name_t)(Verbosity verbosity); + + // Given a verbosity level name, return the verbosity level or + // Verbosity_INVALID if name is not recognized. + typedef Verbosity (*name_to_verbosity_t)(const char* name); + + /* Should be called from the main thread. + You don't *need* to call this, but if you do you get: + * Signal handlers installed + * Program arguments logged + * Working dir logged + * Optional -v verbosity flag parsed + * Main thread name set to "main thread" + * Explanation of the preamble (date, threanmae etc) logged + + loguru::init() will look for arguments meant for loguru and remove them. + Arguments meant for loguru are: + -v n Set loguru::g_stderr_verbosity level. Examples: + -v 3 Show verbosity level 3 and lower. + -v 0 Only show INFO, WARNING, ERROR, FATAL (default). + -v INFO Only show INFO, WARNING, ERROR, FATAL (default). + -v WARNING Only show WARNING, ERROR, FATAL. + -v ERROR Only show ERROR, FATAL. + -v FATAL Only show FATAL. + -v OFF Turn off logging to stderr. + + Tip: You can set g_stderr_verbosity before calling loguru::init. + That way you can set the default but have the user override it with the -v flag. + Note that -v does not affect file logging (see loguru::add_file). + + You can use something else instead of "-v" via verbosity_flag. + You can also set verbosity_flag to nullptr. + */ + LOGURU_EXPORT + void init(int& argc, char* argv[], const char* verbosity_flag = "-v"); + + // Will call remove_all_callbacks(). After calling this, logging will still go to stderr. + // You generally don't need to call this. + LOGURU_EXPORT + void shutdown(); + + // What ~ will be replaced with, e.g. "/home/your_user_name/" + LOGURU_EXPORT + const char* home_dir(); + + /* Returns the name of the app as given in argv[0] but without leading path. + That is, if argv[0] is "../foo/app" this will return "app". + */ + LOGURU_EXPORT + const char* argv0_filename(); + + // Returns all arguments given to loguru::init(), but escaped with a single space as separator. + LOGURU_EXPORT + const char* arguments(); + + // Returns the path to the current working dir when loguru::init() was called. + LOGURU_EXPORT + const char* current_dir(); + + // Returns the part of the path after the last / or \ (if any). + LOGURU_EXPORT + const char* filename(const char* path); + + // e.g. "foo/bar/baz.ext" will create the directories "foo/" and "foo/bar/" + LOGURU_EXPORT + bool create_directories(const char* file_path_const); + + // Writes date and time with millisecond precision, e.g. "20151017_161503.123" + LOGURU_EXPORT + void write_date_time(char* buff, unsigned buff_size); + + // Helper: thread-safe version strerror + LOGURU_EXPORT + Text errno_as_text(); + + /* Given a prefix of e.g. "~/loguru/" this might return + "/home/your_username/loguru/app_name/20151017_161503.123.log" + + where "app_name" is a sanitized version of argv[0]. + */ + LOGURU_EXPORT + void suggest_log_path(const char* prefix, char* buff, unsigned buff_size); + + enum FileMode { Truncate, Append }; + + /* Will log to a file at the given path. + Any logging message with a verbosity lower or equal to + the given verbosity will be included. + The function will create all directories in 'path' if needed. + If path starts with a ~, it will be replaced with loguru::home_dir() + To stop the file logging, just call loguru::remove_callback(path) with the same path. + */ + LOGURU_EXPORT + bool add_file(const char* path, FileMode mode, Verbosity verbosity); + + /* Will be called right before abort(). + You can for instance use this to print custom error messages, or throw an exception. + Feel free to call LOG:ing function from this, but not FATAL ones! */ + LOGURU_EXPORT + void set_fatal_handler(fatal_handler_t handler); + + // Get the current fatal handler, if any. Default value is nullptr. + LOGURU_EXPORT + fatal_handler_t get_fatal_handler(); + + /* Will be called on each log messages with a verbosity less or equal to the given one. + Useful for displaying messages on-screen in a game, for example. + The given on_close is also expected to flush (if desired). + */ + LOGURU_EXPORT + void add_callback( + const char* id, + log_handler_t callback, + void* user_data, + Verbosity verbosity, + close_handler_t on_close = nullptr, + flush_handler_t on_flush = nullptr); + + /* Set a callback that returns custom verbosity level names. If callback + is nullptr or returns nullptr, default log names will be used. + */ + LOGURU_EXPORT + void set_verbosity_to_name_callback(verbosity_to_name_t callback); + + /* Set a callback that returns the verbosity level matching a name. The + callback should return Verbosity_INVALID if the name is not + recognized. + */ + LOGURU_EXPORT + void set_name_to_verbosity_callback(name_to_verbosity_t callback); + + /* Get a custom name for a specific verbosity, if one exists, or nullptr. */ + LOGURU_EXPORT + const char* get_verbosity_name(Verbosity verbosity); + + /* Get the verbosity enum value from a custom 4-character level name, if one exists. + If the name does not match a custom level name, Verbosity_INVALID is returned. + */ + LOGURU_EXPORT + Verbosity get_verbosity_from_name(const char* name); + + // Returns true iff the callback was found (and removed). + LOGURU_EXPORT + bool remove_callback(const char* id); + + // Shut down all file logging and any other callback hooks installed. + LOGURU_EXPORT + void remove_all_callbacks(); + + // Returns the maximum of g_stderr_verbosity and all file/custom outputs. + LOGURU_EXPORT + Verbosity current_verbosity_cutoff(); + +#if LOGURU_USE_FMTLIB + // Actual logging function. Use the LOG macro instead of calling this directly. + LOGURU_EXPORT + void log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::ArgList args); + FMT_VARIADIC(void, log, Verbosity, const char*, unsigned, LOGURU_FORMAT_STRING_TYPE) + + // Log without any preamble or indentation. + LOGURU_EXPORT + void raw_log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::ArgList args); + FMT_VARIADIC(void, raw_log, Verbosity, const char*, unsigned, LOGURU_FORMAT_STRING_TYPE) +#else // LOGURU_USE_FMTLIB? + // Actual logging function. Use the LOG macro instead of calling this directly. + LOGURU_EXPORT + void log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(4, 5); + + // Log without any preamble or indentation. + LOGURU_EXPORT + void raw_log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(4, 5); +#endif // !LOGURU_USE_FMTLIB + + // Helper class for LOG_SCOPE_F + class LOGURU_EXPORT LogScopeRAII + { + public: + LogScopeRAII() : _file(nullptr) {} // No logging + LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(5, 6); + ~LogScopeRAII(); + +#if defined(_MSC_VER) && _MSC_VER > 1800 + // older MSVC default move ctors close the scope on move. See + // issue #43 + LogScopeRAII(LogScopeRAII&& other) + : _verbosity(other._verbosity) + , _file(other._file) + , _line(other._line) + , _indent_stderr(other._indent_stderr) + , _start_time_ns(other._start_time_ns) + { + // Make sure the tmp object's destruction doesn't close the scope: + other._file = nullptr; + + for (unsigned int i = 0; i < LOGURU_SCOPE_TEXT_SIZE; ++i) { + _name[i] = other._name[i]; + } + } +#else + LogScopeRAII(LogScopeRAII&&) = default; +#endif + + private: + LogScopeRAII(const LogScopeRAII&) = delete; + LogScopeRAII& operator=(const LogScopeRAII&) = delete; + void operator=(LogScopeRAII&&) = delete; + + Verbosity _verbosity; + const char* _file; // Set to null if we are disabled due to verbosity + unsigned _line; + bool _indent_stderr; // Did we? + long long _start_time_ns; + char _name[LOGURU_SCOPE_TEXT_SIZE]; + }; + + // Marked as 'noreturn' for the benefit of the static analyzer and optimizer. + // stack_trace_skip is the number of extrace stack frames to skip above log_and_abort. + LOGURU_EXPORT + LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(5, 6); + LOGURU_EXPORT + LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line); + + // Flush output to stderr and files. + // If g_flush_interval_ms is set to non-zero, this will be called automatically this often. + // If not set, you do not need to call this at all. + LOGURU_EXPORT + void flush(); + + template inline Text format_value(const T&) { return textprintf("N/A"); } + template<> inline Text format_value(const char& v) { return textprintf("%c", v); } + template<> inline Text format_value(const int& v) { return textprintf("%d", v); } + template<> inline Text format_value(const unsigned int& v) { return textprintf("%u", v); } + template<> inline Text format_value(const long& v) { return textprintf("%lu", v); } + template<> inline Text format_value(const unsigned long& v) { return textprintf("%ld", v); } + template<> inline Text format_value(const long long& v) { return textprintf("%llu", v); } + template<> inline Text format_value(const unsigned long long& v) { return textprintf("%lld", v); } + template<> inline Text format_value(const float& v) { return textprintf("%f", v); } + template<> inline Text format_value(const double& v) { return textprintf("%f", v); } + + /* Thread names can be set for the benefit of readable logs. + If you do not set the thread name, a hex id will be shown instead. + These thread names may or may not be the same as the system thread names, + depending on the system. + Try to limit the thread name to 15 characters or less. */ + LOGURU_EXPORT + void set_thread_name(const char* name); + + /* Returns the thread name for this thread. + On OSX this will return the system thread name (settable from both within and without Loguru). + On other systems it will return whatever you set in set_thread_name(); + If no thread name is set, this will return a hexadecimal thread id. + length should be the number of bytes available in the buffer. + 17 is a good number for length. + right_align_hext_id means any hexadecimal thread id will be written to the end of buffer. + */ + LOGURU_EXPORT + void get_thread_name(char* buffer, unsigned long long length, bool right_align_hext_id); + + /* Generates a readable stacktrace as a string. + 'skip' specifies how many stack frames to skip. + For instance, the default skip (1) means: + don't include the call to loguru::stacktrace in the stack trace. */ + LOGURU_EXPORT + Text stacktrace(int skip = 1); + + /* Add a string to be replaced with something else in the stack output. + + For instance, instead of having a stack trace look like this: + 0x41f541 some_function(std::basic_ofstream >&) + You can clean it up with: + auto verbose_type_name = loguru::demangle(typeid(std::ofstream).name()); + loguru::add_stack_cleanup(verbose_type_name.c_str(); "std::ofstream"); + So the next time you will instead see: + 0x41f541 some_function(std::ofstream&) + + `replace_with_this` must be shorter than `find_this`. + */ + LOGURU_EXPORT + void add_stack_cleanup(const char* find_this, const char* replace_with_this); + + // Example: demangle(typeid(std::ofstream).name()) -> "std::basic_ofstream >" + LOGURU_EXPORT + Text demangle(const char* name); + + // ------------------------------------------------------------------------ + /* + Not all terminals support colors, but if they do, and g_colorlogtostderr + is set, Loguru will write them to stderr to make errors in red, etc. + + You also have the option to manually use them, via the function below. + + Note, however, that if you do, the color codes could end up in your logfile! + + This means if you intend to use them functions you should either: + * Use them on the stderr/stdout directly (bypass Loguru). + * Don't add file outputs to Loguru. + * Expect some \e[1m things in your logfile. + + Usage: + printf("%sRed%sGreen%sBold green%sClear again\n", + loguru::terminal_red(), loguru::terminal_green(), + loguru::terminal_bold(), loguru::terminal_reset()); + + If the terminal at hand does not support colors the above output + will just not have funky \e[1m things showing. + */ + + // Do the output terminal support colors? + LOGURU_EXPORT + bool terminal_has_color(); + + // Colors + LOGURU_EXPORT const char* terminal_black(); + LOGURU_EXPORT const char* terminal_red(); + LOGURU_EXPORT const char* terminal_green(); + LOGURU_EXPORT const char* terminal_yellow(); + LOGURU_EXPORT const char* terminal_blue(); + LOGURU_EXPORT const char* terminal_purple(); + LOGURU_EXPORT const char* terminal_cyan(); + LOGURU_EXPORT const char* terminal_light_gray(); + LOGURU_EXPORT const char* terminal_light_red(); + LOGURU_EXPORT const char* terminal_white(); + + // Formating + LOGURU_EXPORT const char* terminal_bold(); + LOGURU_EXPORT const char* terminal_underline(); + + // You should end each line with this! + LOGURU_EXPORT const char* terminal_reset(); + + // -------------------------------------------------------------------- + // Error context related: + + struct StringStream; + + // Use this in your EcEntryBase::print_value overload. + LOGURU_EXPORT + void stream_print(StringStream& out_string_stream, const char* text); + + class LOGURU_EXPORT EcEntryBase + { + public: + EcEntryBase(const char* file, unsigned line, const char* descr); + ~EcEntryBase(); + EcEntryBase(const EcEntryBase&) = delete; + EcEntryBase(EcEntryBase&&) = delete; + EcEntryBase& operator=(const EcEntryBase&) = delete; + EcEntryBase& operator=(EcEntryBase&&) = delete; + + virtual void print_value(StringStream& out_string_stream) const = 0; + + EcEntryBase* previous() const { return _previous; } + + // private: + const char* _file; + unsigned _line; + const char* _descr; + EcEntryBase* _previous; + }; + + template + class EcEntryData : public EcEntryBase + { + public: + using Printer = Text(*)(T data); + + EcEntryData(const char* file, unsigned line, const char* descr, T data, Printer&& printer) + : EcEntryBase(file, line, descr), _data(data), _printer(printer) {} + + virtual void print_value(StringStream& out_string_stream) const override + { + const auto str = _printer(_data); + stream_print(out_string_stream, str.c_str()); + } + + private: + T _data; + Printer _printer; + }; + + // template + // class EcEntryLambda : public EcEntryBase + // { + // public: + // EcEntryLambda(const char* file, unsigned line, const char* descr, Printer&& printer) + // : EcEntryBase(file, line, descr), _printer(std::move(printer)) {} + + // virtual void print_value(StringStream& out_string_stream) const override + // { + // const auto str = _printer(); + // stream_print(out_string_stream, str.c_str()); + // } + + // private: + // Printer _printer; + // }; + + // template + // EcEntryLambda make_ec_entry_lambda(const char* file, unsigned line, const char* descr, Printer&& printer) + // { + // return {file, line, descr, std::move(printer)}; + // } + + template + struct decay_char_array { using type = T; }; + + template + struct decay_char_array { using type = const char*; }; + + template + struct make_const_ptr { using type = T; }; + + template + struct make_const_ptr { using type = const T*; }; + + template + struct make_ec_type { using type = typename make_const_ptr::type>::type; }; + + /* A stack trace gives you the names of the function at the point of a crash. + With ERROR_CONTEXT, you can also get the values of select local variables. + Usage: + + void process_customers(const std::string& filename) + { + ERROR_CONTEXT("Processing file", filename.c_str()); + for (int customer_index : ...) + { + ERROR_CONTEXT("Customer index", customer_index); + ... + } + } + + The context is in effect during the scope of the ERROR_CONTEXT. + Use loguru::get_error_context() to get the contents of the active error contexts. + + Example result: + + ------------------------------------------------ + [ErrorContext] main.cpp:416 Processing file: "customers.json" + [ErrorContext] main.cpp:417 Customer index: 42 + ------------------------------------------------ + + Error contexts are printed automatically on crashes, and only on crashes. + This makes them much faster than logging the value of a variable. + */ + #define ERROR_CONTEXT(descr, data) \ + const loguru::EcEntryData::type> \ + LOGURU_ANONYMOUS_VARIABLE(error_context_scope_)( \ + __FILE__, __LINE__, descr, data, \ + static_cast::type>::Printer>(loguru::ec_to_text) ) // For better error messages + +/* + #define ERROR_CONTEXT(descr, data) \ + const auto LOGURU_ANONYMOUS_VARIABLE(error_context_scope_)( \ + loguru::make_ec_entry_lambda(__FILE__, __LINE__, descr, \ + [=](){ return loguru::ec_to_text(data); })) +*/ + + using EcHandle = const EcEntryBase*; + + /* + Get a light-weight handle to the error context stack on this thread. + The handle is valid as long as the current thread has no changes to its error context stack. + You can pass the handle to loguru::get_error_context on another thread. + This can be very useful for when you have a parent thread spawning several working threads, + and you want the error context of the parent thread to get printed (too) when there is an + error on the child thread. You can accomplish this thusly: + + void foo(const char* parameter) + { + ERROR_CONTEXT("parameter", parameter) + const auto parent_ec_handle = loguru::get_thread_ec_handle(); + + std::thread([=]{ + loguru::set_thread_name("child thread"); + ERROR_CONTEXT("parent context", parent_ec_handle); + dangerous_code(); + }.join(); + } + + */ + LOGURU_EXPORT + EcHandle get_thread_ec_handle(); + + // Get a string describing the current stack of error context. Empty string if there is none. + LOGURU_EXPORT + Text get_error_context(); + + // Get a string describing the error context of the given thread handle. + LOGURU_EXPORT + Text get_error_context_for(EcHandle ec_handle); + + // ------------------------------------------------------------------------ + + LOGURU_EXPORT Text ec_to_text(const char* data); + LOGURU_EXPORT Text ec_to_text(char data); + LOGURU_EXPORT Text ec_to_text(int data); + LOGURU_EXPORT Text ec_to_text(unsigned int data); + LOGURU_EXPORT Text ec_to_text(long data); + LOGURU_EXPORT Text ec_to_text(unsigned long data); + LOGURU_EXPORT Text ec_to_text(long long data); + LOGURU_EXPORT Text ec_to_text(unsigned long long data); + LOGURU_EXPORT Text ec_to_text(float data); + LOGURU_EXPORT Text ec_to_text(double data); + LOGURU_EXPORT Text ec_to_text(long double data); + LOGURU_EXPORT Text ec_to_text(EcHandle); + + /* + You can add ERROR_CONTEXT support for your own types by overloading ec_to_text. Here's how: + + some.hpp: + namespace loguru { + Text ec_to_text(MySmallType data) + Text ec_to_text(const MyBigType* data) + } // namespace loguru + + some.cpp: + namespace loguru { + Text ec_to_text(MySmallType small_value) + { + // Called only when needed, i.e. on a crash. + std::string str = small_value.as_string(); // Format 'small_value' here somehow. + return Text{strdup(str.c_str())}; + } + + Text ec_to_text(const MyBigType* big_value) + { + // Called only when needed, i.e. on a crash. + std::string str = big_value->as_string(); // Format 'big_value' here somehow. + return Text{strdup(str.c_str())}; + } + } // namespace loguru + + Any file that include some.hpp: + void foo(MySmallType small, const MyBigType& big) + { + ERROR_CONTEXT("Small", small); // Copy ´small` by value. + ERROR_CONTEXT("Big", &big); // `big` should not change during this scope! + .... + } + */ +} // namespace loguru + +// -------------------------------------------------------------------- +// Logging macros + +// LOG_F(2, "Only logged if verbosity is 2 or higher: %d", some_number); +#define VLOG_F(verbosity, ...) \ + ((verbosity) > loguru::current_verbosity_cutoff()) ? (void)0 \ + : loguru::log(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +// LOG_F(INFO, "Foo: %d", some_number); +#define LOG_F(verbosity_name, ...) VLOG_F(loguru::Verbosity_ ## verbosity_name, __VA_ARGS__) + +#define VLOG_IF_F(verbosity, cond, ...) \ + ((verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ + ? (void)0 \ + : loguru::log(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +#define LOG_IF_F(verbosity_name, cond, ...) \ + VLOG_IF_F(loguru::Verbosity_ ## verbosity_name, cond, __VA_ARGS__) + +#define VLOG_SCOPE_F(verbosity, ...) \ + loguru::LogScopeRAII LOGURU_ANONYMOUS_VARIABLE(error_context_RAII_) = \ + ((verbosity) > loguru::current_verbosity_cutoff()) ? loguru::LogScopeRAII() : \ + loguru::LogScopeRAII(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +// Raw logging - no preamble, no indentation. Slightly faster than full logging. +#define RAW_VLOG_F(verbosity, ...) \ + ((verbosity) > loguru::current_verbosity_cutoff()) ? (void)0 \ + : loguru::raw_log(verbosity, __FILE__, __LINE__, __VA_ARGS__) + +#define RAW_LOG_F(verbosity_name, ...) RAW_VLOG_F(loguru::Verbosity_ ## verbosity_name, __VA_ARGS__) + +// Use to book-end a scope. Affects logging on all threads. +#define LOG_SCOPE_F(verbosity_name, ...) \ + VLOG_SCOPE_F(loguru::Verbosity_ ## verbosity_name, __VA_ARGS__) + +#define LOG_SCOPE_FUNCTION(verbosity_name) LOG_SCOPE_F(verbosity_name, __func__) + +// ----------------------------------------------- +// ABORT_F macro. Usage: ABORT_F("Cause of error: %s", error_str); + +// Message is optional +#define ABORT_F(...) loguru::log_and_abort(0, "ABORT: ", __FILE__, __LINE__, __VA_ARGS__) + +// -------------------------------------------------------------------- +// CHECK_F macros: + +#define CHECK_WITH_INFO_F(test, info, ...) \ + LOGURU_PREDICT_TRUE((test) == true) ? (void)0 : loguru::log_and_abort(0, "CHECK FAILED: " info " ", __FILE__, \ + __LINE__, ##__VA_ARGS__) + +/* Checked at runtime too. Will print error, then call fatal_handler (if any), then 'abort'. + Note that the test must be boolean. + CHECK_F(ptr); will not compile, but CHECK_F(ptr != nullptr); will. */ +#define CHECK_F(test, ...) CHECK_WITH_INFO_F(test, #test, ##__VA_ARGS__) + +#define CHECK_NOTNULL_F(x, ...) CHECK_WITH_INFO_F((x) != nullptr, #x " != nullptr", ##__VA_ARGS__) + +#define CHECK_OP_F(expr_left, expr_right, op, ...) \ + do \ + { \ + auto val_left = expr_left; \ + auto val_right = expr_right; \ + if (! LOGURU_PREDICT_TRUE(val_left op val_right)) \ + { \ + auto str_left = loguru::format_value(val_left); \ + auto str_right = loguru::format_value(val_right); \ + auto fail_info = loguru::textprintf("CHECK FAILED: %s %s %s (%s %s %s) ", \ + #expr_left, #op, #expr_right, str_left.c_str(), #op, str_right.c_str()); \ + auto user_msg = loguru::textprintf(__VA_ARGS__); \ + loguru::log_and_abort(0, fail_info.c_str(), __FILE__, __LINE__, \ + "%s", user_msg.c_str()); \ + } \ + } while (false) + +#ifndef LOGURU_DEBUG_LOGGING + #ifndef NDEBUG + #define LOGURU_DEBUG_LOGGING 1 + #else + #define LOGURU_DEBUG_LOGGING 0 + #endif +#endif + +#if LOGURU_DEBUG_LOGGING + // Debug logging enabled: + #define DLOG_F(verbosity_name, ...) LOG_F(verbosity_name, __VA_ARGS__) + #define DVLOG_F(verbosity, ...) VLOG_F(verbosity, __VA_ARGS__) + #define DLOG_IF_F(verbosity_name, ...) LOG_IF_F(verbosity_name, __VA_ARGS__) + #define DVLOG_IF_F(verbosity, ...) VLOG_IF_F(verbosity, __VA_ARGS__) + #define DRAW_LOG_F(verbosity_name, ...) RAW_LOG_F(verbosity_name, __VA_ARGS__) + #define DRAW_VLOG_F(verbosity, ...) RAW_VLOG_F(verbosity, __VA_ARGS__) +#else + // Debug logging disabled: + #define DLOG_F(verbosity_name, ...) + #define DVLOG_F(verbosity, ...) + #define DLOG_IF_F(verbosity_name, ...) + #define DVLOG_IF_F(verbosity, ...) + #define DRAW_LOG_F(verbosity_name, ...) + #define DRAW_VLOG_F(verbosity, ...) +#endif + +#define CHECK_EQ_F(a, b, ...) CHECK_OP_F(a, b, ==, ##__VA_ARGS__) +#define CHECK_NE_F(a, b, ...) CHECK_OP_F(a, b, !=, ##__VA_ARGS__) +#define CHECK_LT_F(a, b, ...) CHECK_OP_F(a, b, < , ##__VA_ARGS__) +#define CHECK_GT_F(a, b, ...) CHECK_OP_F(a, b, > , ##__VA_ARGS__) +#define CHECK_LE_F(a, b, ...) CHECK_OP_F(a, b, <=, ##__VA_ARGS__) +#define CHECK_GE_F(a, b, ...) CHECK_OP_F(a, b, >=, ##__VA_ARGS__) + +#ifndef LOGURU_DEBUG_CHECKS + #ifndef NDEBUG + #define LOGURU_DEBUG_CHECKS 1 + #else + #define LOGURU_DEBUG_CHECKS 0 + #endif +#endif + +#if LOGURU_DEBUG_CHECKS + // Debug checks enabled: + #define DCHECK_F(test, ...) CHECK_F(test, ##__VA_ARGS__) + #define DCHECK_NOTNULL_F(x, ...) CHECK_NOTNULL_F(x, ##__VA_ARGS__) + #define DCHECK_EQ_F(a, b, ...) CHECK_EQ_F(a, b, ##__VA_ARGS__) + #define DCHECK_NE_F(a, b, ...) CHECK_NE_F(a, b, ##__VA_ARGS__) + #define DCHECK_LT_F(a, b, ...) CHECK_LT_F(a, b, ##__VA_ARGS__) + #define DCHECK_LE_F(a, b, ...) CHECK_LE_F(a, b, ##__VA_ARGS__) + #define DCHECK_GT_F(a, b, ...) CHECK_GT_F(a, b, ##__VA_ARGS__) + #define DCHECK_GE_F(a, b, ...) CHECK_GE_F(a, b, ##__VA_ARGS__) +#else + // Debug checks disabled: + #define DCHECK_F(test, ...) + #define DCHECK_NOTNULL_F(x, ...) + #define DCHECK_EQ_F(a, b, ...) + #define DCHECK_NE_F(a, b, ...) + #define DCHECK_LT_F(a, b, ...) + #define DCHECK_LE_F(a, b, ...) + #define DCHECK_GT_F(a, b, ...) + #define DCHECK_GE_F(a, b, ...) +#endif // NDEBUG + + +#if LOGURU_REDEFINE_ASSERT + #undef assert + #ifndef NDEBUG + // Debug: + #define assert(test) CHECK_WITH_INFO_F(!!(test), #test) // HACK + #else + #define assert(test) + #endif +#endif // LOGURU_REDEFINE_ASSERT + +#endif // LOGURU_HAS_DECLARED_FORMAT_HEADER + +// ---------------------------------------------------------------------------- +// .dP"Y8 888888 88""Yb 888888 db 8b d8 .dP"Y8 +// `Ybo." 88 88__dP 88__ dPYb 88b d88 `Ybo." +// o.`Y8b 88 88"Yb 88"" dP__Yb 88YbdP88 o.`Y8b +// 8bodP' 88 88 Yb 888888 dP""""Yb 88 YY 88 8bodP' + +#if LOGURU_WITH_STREAMS +#ifndef LOGURU_HAS_DECLARED_STREAMS_HEADER +#define LOGURU_HAS_DECLARED_STREAMS_HEADER + +/* This file extends loguru to enable std::stream-style logging, a la Glog. + It's an optional feature behind the LOGURU_WITH_STREAMS settings + because including it everywhere will slow down compilation times. +*/ + +#include +#include // Adds about 38 kLoC on clang. +#include + +namespace loguru +{ + // Like sprintf, but returns the formated text. + LOGURU_EXPORT + std::string strprintf(LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(1, 2); + + // Like vsprintf, but returns the formated text. + LOGURU_EXPORT + std::string vstrprintf(LOGURU_FORMAT_STRING_TYPE format, va_list) LOGURU_PRINTF_LIKE(1, 0); + + class LOGURU_EXPORT StreamLogger + { + public: + StreamLogger(Verbosity verbosity, const char* file, unsigned line) : _verbosity(verbosity), _file(file), _line(line) {} + ~StreamLogger() noexcept(false); + + template + StreamLogger& operator<<(const T& t) + { + _ss << t; + return *this; + } + + // std::endl and other iomanip:s. + StreamLogger& operator<<(std::ostream&(*f)(std::ostream&)) + { + f(_ss); + return *this; + } + + private: + Verbosity _verbosity; + const char* _file; + unsigned _line; + std::ostringstream _ss; + }; + + class LOGURU_EXPORT AbortLogger + { + public: + AbortLogger(const char* expr, const char* file, unsigned line) : _expr(expr), _file(file), _line(line) { } + LOGURU_NORETURN ~AbortLogger() noexcept(false); + + template + AbortLogger& operator<<(const T& t) + { + _ss << t; + return *this; + } + + // std::endl and other iomanip:s. + AbortLogger& operator<<(std::ostream&(*f)(std::ostream&)) + { + f(_ss); + return *this; + } + + private: + const char* _expr; + const char* _file; + unsigned _line; + std::ostringstream _ss; + }; + + class LOGURU_EXPORT Voidify + { + public: + Voidify() {} + // This has to be an operator with a precedence lower than << but higher than ?: + void operator&(const StreamLogger&) { } + void operator&(const AbortLogger&) { } + }; + + /* Helper functions for CHECK_OP_S macro. + GLOG trick: The (int, int) specialization works around the issue that the compiler + will not instantiate the template version of the function on values of unnamed enum type. */ + #define DEFINE_CHECK_OP_IMPL(name, op) \ + template \ + inline std::string* name(const char* expr, const T1& v1, const char* op_str, const T2& v2) \ + { \ + if (LOGURU_PREDICT_TRUE(v1 op v2)) { return NULL; } \ + std::ostringstream ss; \ + ss << "CHECK FAILED: " << expr << " (" << v1 << " " << op_str << " " << v2 << ") "; \ + return new std::string(ss.str()); \ + } \ + inline std::string* name(const char* expr, int v1, const char* op_str, int v2) \ + { \ + return name(expr, v1, op_str, v2); \ + } + + DEFINE_CHECK_OP_IMPL(check_EQ_impl, ==) + DEFINE_CHECK_OP_IMPL(check_NE_impl, !=) + DEFINE_CHECK_OP_IMPL(check_LE_impl, <=) + DEFINE_CHECK_OP_IMPL(check_LT_impl, < ) + DEFINE_CHECK_OP_IMPL(check_GE_impl, >=) + DEFINE_CHECK_OP_IMPL(check_GT_impl, > ) + #undef DEFINE_CHECK_OP_IMPL + + /* GLOG trick: Function is overloaded for integral types to allow static const integrals + declared in classes and not defined to be used as arguments to CHECK* macros. */ + template + inline const T& referenceable_value(const T& t) { return t; } + inline char referenceable_value(char t) { return t; } + inline unsigned char referenceable_value(unsigned char t) { return t; } + inline signed char referenceable_value(signed char t) { return t; } + inline short referenceable_value(short t) { return t; } + inline unsigned short referenceable_value(unsigned short t) { return t; } + inline int referenceable_value(int t) { return t; } + inline unsigned int referenceable_value(unsigned int t) { return t; } + inline long referenceable_value(long t) { return t; } + inline unsigned long referenceable_value(unsigned long t) { return t; } + inline long long referenceable_value(long long t) { return t; } + inline unsigned long long referenceable_value(unsigned long long t) { return t; } +} // namespace loguru + +// ----------------------------------------------- +// Logging macros: + +// usage: LOG_STREAM(INFO) << "Foo " << std::setprecision(10) << some_value; +#define VLOG_IF_S(verbosity, cond) \ + ((verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ + ? (void)0 \ + : loguru::Voidify() & loguru::StreamLogger(verbosity, __FILE__, __LINE__) +#define LOG_IF_S(verbosity_name, cond) VLOG_IF_S(loguru::Verbosity_ ## verbosity_name, cond) +#define VLOG_S(verbosity) VLOG_IF_S(verbosity, true) +#define LOG_S(verbosity_name) VLOG_S(loguru::Verbosity_ ## verbosity_name) + +// ----------------------------------------------- +// ABORT_S macro. Usage: ABORT_S() << "Causo of error: " << details; + +#define ABORT_S() loguru::Voidify() & loguru::AbortLogger("ABORT: ", __FILE__, __LINE__) + +// ----------------------------------------------- +// CHECK_S macros: + +#define CHECK_WITH_INFO_S(cond, info) \ + LOGURU_PREDICT_TRUE((cond) == true) \ + ? (void)0 \ + : loguru::Voidify() & loguru::AbortLogger("CHECK FAILED: " info " ", __FILE__, __LINE__) + +#define CHECK_S(cond) CHECK_WITH_INFO_S(cond, #cond) +#define CHECK_NOTNULL_S(x) CHECK_WITH_INFO_S((x) != nullptr, #x " != nullptr") + +#define CHECK_OP_S(function_name, expr1, op, expr2) \ + while (auto error_string = loguru::function_name(#expr1 " " #op " " #expr2, \ + loguru::referenceable_value(expr1), #op, \ + loguru::referenceable_value(expr2))) \ + loguru::AbortLogger(error_string->c_str(), __FILE__, __LINE__) + +#define CHECK_EQ_S(expr1, expr2) CHECK_OP_S(check_EQ_impl, expr1, ==, expr2) +#define CHECK_NE_S(expr1, expr2) CHECK_OP_S(check_NE_impl, expr1, !=, expr2) +#define CHECK_LE_S(expr1, expr2) CHECK_OP_S(check_LE_impl, expr1, <=, expr2) +#define CHECK_LT_S(expr1, expr2) CHECK_OP_S(check_LT_impl, expr1, < , expr2) +#define CHECK_GE_S(expr1, expr2) CHECK_OP_S(check_GE_impl, expr1, >=, expr2) +#define CHECK_GT_S(expr1, expr2) CHECK_OP_S(check_GT_impl, expr1, > , expr2) + +#if LOGURU_DEBUG_LOGGING + // Debug logging enabled: + #define DVLOG_IF_S(verbosity, cond) VLOG_IF_S(verbosity, cond) + #define DLOG_IF_S(verbosity_name, cond) LOG_IF_S(verbosity_name, cond) + #define DVLOG_S(verbosity) VLOG_S(verbosity) + #define DLOG_S(verbosity_name) LOG_S(verbosity_name) +#else + // Debug logging disabled: + #define DVLOG_IF_S(verbosity, cond) \ + (true || (verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ + ? (void)0 \ + : loguru::Voidify() & loguru::StreamLogger(verbosity, __FILE__, __LINE__) + + #define DLOG_IF_S(verbosity_name, cond) DVLOG_IF_S(loguru::Verbosity_ ## verbosity_name, cond) + #define DVLOG_S(verbosity) DVLOG_IF_S(verbosity, true) + #define DLOG_S(verbosity_name) DVLOG_S(loguru::Verbosity_ ## verbosity_name) +#endif + +#if LOGURU_DEBUG_CHECKS + // Debug checks enabled: + #define DCHECK_S(cond) CHECK_S(cond) + #define DCHECK_NOTNULL_S(x) CHECK_NOTNULL_S(x) + #define DCHECK_EQ_S(a, b) CHECK_EQ_S(a, b) + #define DCHECK_NE_S(a, b) CHECK_NE_S(a, b) + #define DCHECK_LT_S(a, b) CHECK_LT_S(a, b) + #define DCHECK_LE_S(a, b) CHECK_LE_S(a, b) + #define DCHECK_GT_S(a, b) CHECK_GT_S(a, b) + #define DCHECK_GE_S(a, b) CHECK_GE_S(a, b) +#else +// Debug checks disabled: + #define DCHECK_S(cond) CHECK_S(true || (cond)) + #define DCHECK_NOTNULL_S(x) CHECK_S(true || (x) != nullptr) + #define DCHECK_EQ_S(a, b) CHECK_S(true || (a) == (b)) + #define DCHECK_NE_S(a, b) CHECK_S(true || (a) != (b)) + #define DCHECK_LT_S(a, b) CHECK_S(true || (a) < (b)) + #define DCHECK_LE_S(a, b) CHECK_S(true || (a) <= (b)) + #define DCHECK_GT_S(a, b) CHECK_S(true || (a) > (b)) + #define DCHECK_GE_S(a, b) CHECK_S(true || (a) >= (b)) +#endif + +#if LOGURU_REPLACE_GLOG + #undef LOG + #undef VLOG + #undef LOG_IF + #undef VLOG_IF + #undef CHECK + #undef CHECK_NOTNULL + #undef CHECK_EQ + #undef CHECK_NE + #undef CHECK_LT + #undef CHECK_LE + #undef CHECK_GT + #undef CHECK_GE + #undef DLOG + #undef DVLOG + #undef DLOG_IF + #undef DVLOG_IF + #undef DCHECK + #undef DCHECK_NOTNULL + #undef DCHECK_EQ + #undef DCHECK_NE + #undef DCHECK_LT + #undef DCHECK_LE + #undef DCHECK_GT + #undef DCHECK_GE + #undef VLOG_IS_ON + + #define LOG LOG_S + #define VLOG VLOG_S + #define LOG_IF LOG_IF_S + #define VLOG_IF VLOG_IF_S + #define CHECK(cond) CHECK_S(!!(cond)) + #define CHECK_NOTNULL CHECK_NOTNULL_S + #define CHECK_EQ CHECK_EQ_S + #define CHECK_NE CHECK_NE_S + #define CHECK_LT CHECK_LT_S + #define CHECK_LE CHECK_LE_S + #define CHECK_GT CHECK_GT_S + #define CHECK_GE CHECK_GE_S + #define DLOG DLOG_S + #define DVLOG DVLOG_S + #define DLOG_IF DLOG_IF_S + #define DVLOG_IF DVLOG_IF_S + #define DCHECK DCHECK_S + #define DCHECK_NOTNULL DCHECK_NOTNULL_S + #define DCHECK_EQ DCHECK_EQ_S + #define DCHECK_NE DCHECK_NE_S + #define DCHECK_LT DCHECK_LT_S + #define DCHECK_LE DCHECK_LE_S + #define DCHECK_GT DCHECK_GT_S + #define DCHECK_GE DCHECK_GE_S + #define VLOG_IS_ON(verbosity) ((verbosity) <= loguru::current_verbosity_cutoff()) + +#endif // LOGURU_REPLACE_GLOG + +#endif // LOGURU_WITH_STREAMS + +#endif // LOGURU_HAS_DECLARED_STREAMS_HEADER diff --git a/src/includes/3thparty/parallel_hashmap/btree.h b/src/includes/3thparty/parallel_hashmap/btree.h new file mode 100644 index 0000000..2886094 --- /dev/null +++ b/src/includes/3thparty/parallel_hashmap/btree.h @@ -0,0 +1,4063 @@ +// --------------------------------------------------------------------------- +// Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp) +// with modifications. +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// --------------------------------------------------------------------------- + +#ifndef PHMAP_BTREE_BTREE_CONTAINER_H_ +#define PHMAP_BTREE_BTREE_CONTAINER_H_ + +#ifdef _MSC_VER + #pragma warning(push) + + #pragma warning(disable : 4127) // conditional expression is constant + #pragma warning(disable : 4324) // structure was padded due to alignment specifier + #pragma warning(disable : 4355) // 'this': used in base member initializer list + #pragma warning(disable : 4365) // conversion from 'int' to 'const unsigned __int64', signed/unsigned mismatch + #pragma warning(disable : 4514) // unreferenced inline function has been removed + #pragma warning(disable : 4623) // default constructor was implicitly defined as deleted + #pragma warning(disable : 4625) // copy constructor was implicitly defined as deleted + #pragma warning(disable : 4626) // assignment operator was implicitly defined as deleted + #pragma warning(disable : 4710) // function not inlined + #pragma warning(disable : 4711) // selected for automatic inline expansion + #pragma warning(disable : 4820) // '6' bytes padding added after data member + #pragma warning(disable : 4868) // compiler may not enforce left-to-right evaluation order in braced initializer list + #pragma warning(disable : 5026) // move constructor was implicitly defined as deleted + #pragma warning(disable : 5027) // move assignment operator was implicitly defined as deleted + #pragma warning(disable : 5045) // Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified +#endif + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phmap_fwd_decl.h" +#include "phmap_base.h" + +#if PHMAP_HAVE_STD_STRING_VIEW + #include +#endif + +// MSVC constructibility traits do not detect destructor properties and so our +// implementations should not use them as a source-of-truth. +#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__) + #define PHMAP_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1 +#endif + +namespace phmap { + + // Defined and documented later on in this file. + template + struct is_trivially_destructible; + + // Defined and documented later on in this file. + template + struct is_trivially_move_assignable; + + namespace type_traits_internal { + + // Silence MSVC warnings about the destructor being defined as deleted. +#if defined(_MSC_VER) && !defined(__GNUC__) + #pragma warning(push) + #pragma warning(disable : 4624) +#endif // defined(_MSC_VER) && !defined(__GNUC__) + + template + union SingleMemberUnion { + T t; + }; + + // Restore the state of the destructor warning that was silenced above. +#if defined(_MSC_VER) && !defined(__GNUC__) + #pragma warning(pop) +#endif // defined(_MSC_VER) && !defined(__GNUC__) + + template + struct IsTriviallyMoveConstructibleObject + : std::integral_constant< + bool, std::is_move_constructible< + type_traits_internal::SingleMemberUnion>::value && + phmap::is_trivially_destructible::value> {}; + + template + struct IsTriviallyCopyConstructibleObject + : std::integral_constant< + bool, std::is_copy_constructible< + type_traits_internal::SingleMemberUnion>::value && + phmap::is_trivially_destructible::value> {}; + + template + struct IsTriviallyMoveAssignableReference : std::false_type {}; + + template + struct IsTriviallyMoveAssignableReference + : phmap::is_trivially_move_assignable::type {}; + + template + struct IsTriviallyMoveAssignableReference + : phmap::is_trivially_move_assignable::type {}; + + } // namespace type_traits_internal + + + template + using void_t = typename type_traits_internal::VoidTImpl::type; + + + template + struct is_function + : std::integral_constant< + bool, !(std::is_reference::value || + std::is_const::type>::value)> {}; + + + namespace type_traits_internal { + + template + class is_trivially_copyable_impl { + using ExtentsRemoved = typename std::remove_all_extents::type; + static constexpr bool kIsCopyOrMoveConstructible = + std::is_copy_constructible::value || + std::is_move_constructible::value; + static constexpr bool kIsCopyOrMoveAssignable = + phmap::is_copy_assignable::value || + phmap::is_move_assignable::value; + + public: + static constexpr bool kValue = + (__has_trivial_copy(ExtentsRemoved) || !kIsCopyOrMoveConstructible) && + (__has_trivial_assign(ExtentsRemoved) || !kIsCopyOrMoveAssignable) && + (kIsCopyOrMoveConstructible || kIsCopyOrMoveAssignable) && + is_trivially_destructible::value && + // We need to check for this explicitly because otherwise we'll say + // references are trivial copyable when compiled by MSVC. + !std::is_reference::value; + }; + + template + struct is_trivially_copyable + : std::integral_constant< + bool, type_traits_internal::is_trivially_copyable_impl::kValue> {}; + } // namespace type_traits_internal + + namespace swap_internal { + + // Necessary for the traits. + using std::swap; + + // This declaration prevents global `swap` and `phmap::swap` overloads from being + // considered unless ADL picks them up. + void swap(); + + template + using IsSwappableImpl = decltype(swap(std::declval(), std::declval())); + + // NOTE: This dance with the default template parameter is for MSVC. + template (), std::declval()))>> + using IsNothrowSwappableImpl = typename std::enable_if::type; + + template + struct IsSwappable + : phmap::type_traits_internal::is_detected {}; + + template + struct IsNothrowSwappable + : phmap::type_traits_internal::is_detected {}; + + template ::value, int> = 0> + void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable::value) { + swap(lhs, rhs); + } + + using StdSwapIsUnconstrained = IsSwappable; + + } // namespace swap_internal + + namespace type_traits_internal { + + // Make the swap-related traits/function accessible from this namespace. + using swap_internal::IsNothrowSwappable; + using swap_internal::IsSwappable; + using swap_internal::Swap; + using swap_internal::StdSwapIsUnconstrained; + + } // namespace type_traits_internal + + namespace compare_internal { + + using value_type = int8_t; + + template + struct Fail { + static_assert(sizeof(T) < 0, "Only literal `0` is allowed."); + }; + + template + struct OnlyLiteralZero { + constexpr OnlyLiteralZero(NullPtrT) noexcept {} // NOLINT + + template < + typename T, + typename = typename std::enable_if< + std::is_same::value || + (std::is_integral::value && !std::is_same::value)>::type, + typename = typename Fail::type> + OnlyLiteralZero(T); // NOLINT + }; + + enum class eq : value_type { + equal = 0, + equivalent = equal, + nonequal = 1, + nonequivalent = nonequal, + }; + + enum class ord : value_type { less = -1, greater = 1 }; + + enum class ncmp : value_type { unordered = -127 }; + +#ifdef __cpp_inline_variables + +#define PHMAP_COMPARE_INLINE_BASECLASS_DECL(name) + +#define PHMAP_COMPARE_INLINE_SUBCLASS_DECL(type, name) \ + static const type name; + +#define PHMAP_COMPARE_INLINE_INIT(type, name, init) \ + inline constexpr type type::name(init) + +#else // __cpp_inline_variables + +#define PHMAP_COMPARE_INLINE_BASECLASS_DECL(name) \ + static const T name; + +#define PHMAP_COMPARE_INLINE_SUBCLASS_DECL(type, name) + +#define PHMAP_COMPARE_INLINE_INIT(type, name, init) \ + template \ + const T compare_internal::type##_base::name(init) + +#endif // __cpp_inline_variables + + // These template base classes allow for defining the values of the constants + // in the header file (for performance) without using inline variables (which + // aren't available in C++11). + template + struct weak_equality_base { + PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(nonequivalent) + }; + + template + struct strong_equality_base { + PHMAP_COMPARE_INLINE_BASECLASS_DECL(equal) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(nonequal) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(nonequivalent) + }; + + template + struct partial_ordering_base { + PHMAP_COMPARE_INLINE_BASECLASS_DECL(less) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(greater) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(unordered) + }; + + template + struct weak_ordering_base { + PHMAP_COMPARE_INLINE_BASECLASS_DECL(less) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(greater) + }; + + template + struct strong_ordering_base { + PHMAP_COMPARE_INLINE_BASECLASS_DECL(less) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(equal) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) + PHMAP_COMPARE_INLINE_BASECLASS_DECL(greater) + }; + + } // namespace compare_internal + + class weak_equality + : public compare_internal::weak_equality_base { + explicit constexpr weak_equality(compare_internal::eq v) noexcept + : value_(static_cast(v)) {} + friend struct compare_internal::weak_equality_base; + + public: + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, equivalent) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, nonequivalent) + + // Comparisons + friend constexpr bool operator==( + weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ == 0; + } + friend constexpr bool operator!=( + weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ != 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + weak_equality v) noexcept { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + weak_equality v) noexcept { + return 0 != v.value_; + } + + private: + compare_internal::value_type value_; + }; + PHMAP_COMPARE_INLINE_INIT(weak_equality, equivalent, + compare_internal::eq::equivalent); + PHMAP_COMPARE_INLINE_INIT(weak_equality, nonequivalent, + compare_internal::eq::nonequivalent); + + class strong_equality + : public compare_internal::strong_equality_base { + explicit constexpr strong_equality(compare_internal::eq v) noexcept + : value_(static_cast(v)) {} + friend struct compare_internal::strong_equality_base; + + public: + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equal) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequal) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equivalent) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequivalent) + + // Conversion + constexpr operator weak_equality() const noexcept { // NOLINT + return value_ == 0 ? weak_equality::equivalent + : weak_equality::nonequivalent; + } + // Comparisons + friend constexpr bool operator==( + strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ == 0; + } + friend constexpr bool operator!=( + strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ != 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + strong_equality v) noexcept { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + strong_equality v) noexcept { + return 0 != v.value_; + } + + private: + compare_internal::value_type value_; + }; + + PHMAP_COMPARE_INLINE_INIT(strong_equality, equal, compare_internal::eq::equal); + PHMAP_COMPARE_INLINE_INIT(strong_equality, nonequal, + compare_internal::eq::nonequal); + PHMAP_COMPARE_INLINE_INIT(strong_equality, equivalent, + compare_internal::eq::equivalent); + PHMAP_COMPARE_INLINE_INIT(strong_equality, nonequivalent, + compare_internal::eq::nonequivalent); + + class partial_ordering + : public compare_internal::partial_ordering_base { + explicit constexpr partial_ordering(compare_internal::eq v) noexcept + : value_(static_cast(v)) {} + explicit constexpr partial_ordering(compare_internal::ord v) noexcept + : value_(static_cast(v)) {} + explicit constexpr partial_ordering(compare_internal::ncmp v) noexcept + : value_(static_cast(v)) {} + friend struct compare_internal::partial_ordering_base; + + constexpr bool is_ordered() const noexcept { + return value_ != + compare_internal::value_type(compare_internal::ncmp::unordered); + } + + public: + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, less) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, equivalent) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, greater) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, unordered) + + // Conversion + constexpr operator weak_equality() const noexcept { // NOLINT + return value_ == 0 ? weak_equality::equivalent + : weak_equality::nonequivalent; + } + // Comparisons + friend constexpr bool operator==( + partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.is_ordered() && v.value_ == 0; + } + friend constexpr bool operator!=( + partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return !v.is_ordered() || v.value_ != 0; + } + friend constexpr bool operator<( + partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.is_ordered() && v.value_ < 0; + } + friend constexpr bool operator<=( + partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.is_ordered() && v.value_ <= 0; + } + friend constexpr bool operator>( + partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.is_ordered() && v.value_ > 0; + } + friend constexpr bool operator>=( + partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.is_ordered() && v.value_ >= 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + partial_ordering v) noexcept { + return v.is_ordered() && 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + partial_ordering v) noexcept { + return !v.is_ordered() || 0 != v.value_; + } + friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, + partial_ordering v) noexcept { + return v.is_ordered() && 0 < v.value_; + } + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, + partial_ordering v) noexcept { + return v.is_ordered() && 0 <= v.value_; + } + friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, + partial_ordering v) noexcept { + return v.is_ordered() && 0 > v.value_; + } + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, + partial_ordering v) noexcept { + return v.is_ordered() && 0 >= v.value_; + } + + private: + compare_internal::value_type value_; + }; + + PHMAP_COMPARE_INLINE_INIT(partial_ordering, less, compare_internal::ord::less); + PHMAP_COMPARE_INLINE_INIT(partial_ordering, equivalent, + compare_internal::eq::equivalent); + PHMAP_COMPARE_INLINE_INIT(partial_ordering, greater, + compare_internal::ord::greater); + PHMAP_COMPARE_INLINE_INIT(partial_ordering, unordered, + compare_internal::ncmp::unordered); + + class weak_ordering + : public compare_internal::weak_ordering_base { + explicit constexpr weak_ordering(compare_internal::eq v) noexcept + : value_(static_cast(v)) {} + explicit constexpr weak_ordering(compare_internal::ord v) noexcept + : value_(static_cast(v)) {} + friend struct compare_internal::weak_ordering_base; + + public: + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, less) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, equivalent) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, greater) + + // Conversions + constexpr operator weak_equality() const noexcept { // NOLINT + return value_ == 0 ? weak_equality::equivalent + : weak_equality::nonequivalent; + } + constexpr operator partial_ordering() const noexcept { // NOLINT + return value_ == 0 ? partial_ordering::equivalent + : (value_ < 0 ? partial_ordering::less + : partial_ordering::greater); + } + // Comparisons + friend constexpr bool operator==( + weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ == 0; + } + friend constexpr bool operator!=( + weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ != 0; + } + friend constexpr bool operator<( + weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ < 0; + } + friend constexpr bool operator<=( + weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ <= 0; + } + friend constexpr bool operator>( + weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ > 0; + } + friend constexpr bool operator>=( + weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ >= 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + weak_ordering v) noexcept { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + weak_ordering v) noexcept { + return 0 != v.value_; + } + friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, + weak_ordering v) noexcept { + return 0 < v.value_; + } + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, + weak_ordering v) noexcept { + return 0 <= v.value_; + } + friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, + weak_ordering v) noexcept { + return 0 > v.value_; + } + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, + weak_ordering v) noexcept { + return 0 >= v.value_; + } + + private: + compare_internal::value_type value_; + }; + + PHMAP_COMPARE_INLINE_INIT(weak_ordering, less, compare_internal::ord::less); + PHMAP_COMPARE_INLINE_INIT(weak_ordering, equivalent, + compare_internal::eq::equivalent); + PHMAP_COMPARE_INLINE_INIT(weak_ordering, greater, + compare_internal::ord::greater); + + class strong_ordering + : public compare_internal::strong_ordering_base { + explicit constexpr strong_ordering(compare_internal::eq v) noexcept + : value_(static_cast(v)) {} + explicit constexpr strong_ordering(compare_internal::ord v) noexcept + : value_(static_cast(v)) {} + friend struct compare_internal::strong_ordering_base; + + public: + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, less) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equal) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equivalent) + PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, greater) + + // Conversions + constexpr operator weak_equality() const noexcept { // NOLINT + return value_ == 0 ? weak_equality::equivalent + : weak_equality::nonequivalent; + } + constexpr operator strong_equality() const noexcept { // NOLINT + return value_ == 0 ? strong_equality::equal : strong_equality::nonequal; + } + constexpr operator partial_ordering() const noexcept { // NOLINT + return value_ == 0 ? partial_ordering::equivalent + : (value_ < 0 ? partial_ordering::less + : partial_ordering::greater); + } + constexpr operator weak_ordering() const noexcept { // NOLINT + return value_ == 0 + ? weak_ordering::equivalent + : (value_ < 0 ? weak_ordering::less : weak_ordering::greater); + } + // Comparisons + friend constexpr bool operator==( + strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ == 0; + } + friend constexpr bool operator!=( + strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ != 0; + } + friend constexpr bool operator<( + strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ < 0; + } + friend constexpr bool operator<=( + strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ <= 0; + } + friend constexpr bool operator>( + strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ > 0; + } + friend constexpr bool operator>=( + strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + return v.value_ >= 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + strong_ordering v) noexcept { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + strong_ordering v) noexcept { + return 0 != v.value_; + } + friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, + strong_ordering v) noexcept { + return 0 < v.value_; + } + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, + strong_ordering v) noexcept { + return 0 <= v.value_; + } + friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, + strong_ordering v) noexcept { + return 0 > v.value_; + } + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, + strong_ordering v) noexcept { + return 0 >= v.value_; + } + + private: + compare_internal::value_type value_; + }; + PHMAP_COMPARE_INLINE_INIT(strong_ordering, less, compare_internal::ord::less); + PHMAP_COMPARE_INLINE_INIT(strong_ordering, equal, compare_internal::eq::equal); + PHMAP_COMPARE_INLINE_INIT(strong_ordering, equivalent, + compare_internal::eq::equivalent); + PHMAP_COMPARE_INLINE_INIT(strong_ordering, greater, + compare_internal::ord::greater); + +#undef PHMAP_COMPARE_INLINE_BASECLASS_DECL +#undef PHMAP_COMPARE_INLINE_SUBCLASS_DECL +#undef PHMAP_COMPARE_INLINE_INIT + + namespace compare_internal { + // We also provide these comparator adapter functions for internal phmap use. + + // Helper functions to do a boolean comparison of two keys given a boolean + // or three-way comparator. + // SFINAE prevents implicit conversions to bool (such as from int). + template ::value, int> = 0> + constexpr bool compare_result_as_less_than(const Bool r) { return r; } + constexpr bool compare_result_as_less_than(const phmap::weak_ordering r) { + return r < 0; + } + + template + constexpr bool do_less_than_comparison(const Compare &compare, const K &x, + const LK &y) { + return compare_result_as_less_than(compare(x, y)); + } + + // Helper functions to do a three-way comparison of two keys given a boolean or + // three-way comparator. + // SFINAE prevents implicit conversions to int (such as from bool). + template ::value, int> = 0> + constexpr phmap::weak_ordering compare_result_as_ordering(const Int c) { + return c < 0 ? phmap::weak_ordering::less + : c == 0 ? phmap::weak_ordering::equivalent + : phmap::weak_ordering::greater; + } + constexpr phmap::weak_ordering compare_result_as_ordering( + const phmap::weak_ordering c) { + return c; + } + + template < + typename Compare, typename K, typename LK, + phmap::enable_if_t>::value, + int> = 0> + constexpr phmap::weak_ordering do_three_way_comparison(const Compare &compare, + const K &x, const LK &y) { + return compare_result_as_ordering(compare(x, y)); + } + template < + typename Compare, typename K, typename LK, + phmap::enable_if_t>::value, + int> = 0> + constexpr phmap::weak_ordering do_three_way_comparison(const Compare &compare, + const K &x, const LK &y) { + return compare(x, y) ? phmap::weak_ordering::less + : compare(y, x) ? phmap::weak_ordering::greater + : phmap::weak_ordering::equivalent; + } + + } // namespace compare_internal +} + + +namespace phmap { + +namespace container_internal { + + // A helper class that indicates if the Compare parameter is a key-compare-to + // comparator. + template + using btree_is_key_compare_to = + std::is_convertible, + phmap::weak_ordering>; + + struct StringBtreeDefaultLess { + using is_transparent = void; + + StringBtreeDefaultLess() = default; + + // Compatibility constructor. + StringBtreeDefaultLess(std::less) {} // NOLINT +#if PHMAP_HAVE_STD_STRING_VIEW + StringBtreeDefaultLess(std::less) {} // NOLINT + StringBtreeDefaultLess(phmap::Less) {} // NOLINT + + phmap::weak_ordering operator()(std::string_view lhs, + std::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } +#else + phmap::weak_ordering operator()(std::string lhs, + std::string rhs) const { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } +#endif + }; + + struct StringBtreeDefaultGreater { + using is_transparent = void; + + StringBtreeDefaultGreater() = default; + + StringBtreeDefaultGreater(std::greater) {} // NOLINT +#if PHMAP_HAVE_STD_STRING_VIEW + StringBtreeDefaultGreater(std::greater) {} // NOLINT + + phmap::weak_ordering operator()(std::string_view lhs, + std::string_view rhs) const { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } +#else + phmap::weak_ordering operator()(std::string lhs, + std::string rhs) const { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } +#endif + }; + + // A helper class to convert a boolean comparison into a three-way "compare-to" + // comparison that returns a negative value to indicate less-than, zero to + // indicate equality and a positive value to indicate greater-than. This helper + // class is specialized for less, greater, + // less, and greater. + // + // key_compare_to_adapter is provided so that btree users + // automatically get the more efficient compare-to code when using common + // google string types with common comparison functors. + // These string-like specializations also turn on heterogeneous lookup by + // default. + template + struct key_compare_to_adapter { + using type = Compare; + }; + + template <> + struct key_compare_to_adapter> { + using type = StringBtreeDefaultLess; + }; + + template <> + struct key_compare_to_adapter> { + using type = StringBtreeDefaultLess; + }; + + template <> + struct key_compare_to_adapter> { + using type = StringBtreeDefaultGreater; + }; + +#if PHMAP_HAVE_STD_STRING_VIEW + template <> + struct key_compare_to_adapter> { + using type = StringBtreeDefaultLess; + }; + + template <> + struct key_compare_to_adapter> { + using type = StringBtreeDefaultLess; + }; + + template <> + struct key_compare_to_adapter> { + using type = StringBtreeDefaultGreater; + }; +#endif + + template + struct common_params { + // If Compare is a common comparator for a std::string-like type, then we adapt it + // to use heterogeneous lookup and to be a key-compare-to comparator. + using key_compare = typename key_compare_to_adapter::type; + // A type which indicates if we have a key-compare-to functor or a plain old + // key-compare functor. + using is_key_compare_to = btree_is_key_compare_to; + + using allocator_type = Alloc; + using key_type = Key; + using size_type = std::make_signed::type; + using difference_type = ptrdiff_t; + + // True if this is a multiset or multimap. + using is_multi_container = std::integral_constant; + + using slot_policy = SlotPolicy; + using slot_type = typename slot_policy::slot_type; + using value_type = typename slot_policy::value_type; + using init_type = typename slot_policy::mutable_value_type; + using pointer = value_type *; + using const_pointer = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + + enum { + kTargetNodeSize = TargetNodeSize, + + // Upper bound for the available space for values. This is largest for leaf + // nodes, which have overhead of at least a pointer + 4 bytes (for storing + // 3 field_types and an enum). + kNodeValueSpace = + TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), + }; + + // This is an integral type large enough to hold as many + // ValueSize-values as will fit a node of TargetNodeSize bytes. + using node_count_type = + phmap::conditional_t<(kNodeValueSpace / sizeof(value_type) > + (std::numeric_limits::max)()), + uint16_t, uint8_t>; // NOLINT + + // The following methods are necessary for passing this struct as PolicyTraits + // for node_handle and/or are used within btree. + static value_type &element(slot_type *slot) { + return slot_policy::element(slot); + } + static const value_type &element(const slot_type *slot) { + return slot_policy::element(slot); + } + template + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + slot_policy::construct(alloc, slot, other); + } + static void destroy(Alloc *alloc, slot_type *slot) { + slot_policy::destroy(alloc, slot); + } + static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { + construct(alloc, new_slot, old_slot); + destroy(alloc, old_slot); + } + static void swap(Alloc *alloc, slot_type *a, slot_type *b) { + slot_policy::swap(alloc, a, b); + } + static void move(Alloc *alloc, slot_type *src, slot_type *dest) { + slot_policy::move(alloc, src, dest); + } + static void move(Alloc *alloc, slot_type *first, slot_type *last, + slot_type *result) { + slot_policy::move(alloc, first, last, result); + } + }; + + // A parameters structure for holding the type parameters for a btree_map. + // Compare and Alloc should be nothrow copy-constructible. + template + struct map_params : common_params> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + using key_compare = typename super_type::key_compare; + // Inherit from key_compare for empty base class optimization. + struct value_compare : private key_compare { + value_compare() = default; + explicit value_compare(const key_compare &cmp) : key_compare(cmp) {} + + template + auto operator()(const T &left, const U &right) const + -> decltype(std::declval()(left.first, right.first)) { + return key_compare::operator()(left.first, right.first); + } + }; + using is_map_container = std::true_type; + + static const Key &key(const value_type &x) { return x.first; } + static const Key &key(const init_type &x) { return x.first; } + static const Key &key(const slot_type *x) { return slot_policy::key(x); } + static mapped_type &value(value_type *value) { return value->second; } + }; + + // This type implements the necessary functions from the + // btree::container_internal::slot_type interface. + template + struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + phmap::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + phmap::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void destroy(Alloc *alloc, slot_type *slot) { + phmap::allocator_traits::destroy(*alloc, slot); + } + + template + static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { + using std::swap; + swap(*a, *b); + } + + template + static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { + *dest = std::move(*src); + } + + template + static void move(Alloc *alloc, slot_type *first, slot_type *last, + slot_type *result) { + for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) + move(alloc, src, dest); + } + }; + + // A parameters structure for holding the type parameters for a btree_set. + // Compare and Alloc should be nothrow copy-constructible. + template + struct set_params : common_params> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + using value_compare = typename set_params::common_params::key_compare; + using is_map_container = std::false_type; + + static const Key &key(const value_type &x) { return x; } + static const Key &key(const slot_type *x) { return *x; } + }; + + // An adapter class that converts a lower-bound compare into an upper-bound + // compare. Note: there is no need to make a version of this adapter specialized + // for key-compare-to functors because the upper-bound (the first value greater + // than the input) is never an exact match. + template + struct upper_bound_adapter { + explicit upper_bound_adapter(const Compare &c) : comp(c) {} + template + bool operator()(const K &a, const LK &b) const { + // Returns true when a is not greater than b. + return !phmap::compare_internal::compare_result_as_less_than(comp(b, a)); + } + + private: + Compare comp; + }; + + enum class MatchKind : uint8_t { kEq, kNe }; + + template + struct SearchResult { + V value; + MatchKind match; + + static constexpr bool HasMatch() { return true; } + bool IsEq() const { return match == MatchKind::kEq; } + }; + + // When we don't use CompareTo, `match` is not present. + // This ensures that callers can't use it accidentally when it provides no + // useful information. + template + struct SearchResult { + V value; + + static constexpr bool HasMatch() { return false; } + static constexpr bool IsEq() { return false; } + }; + + // A node in the btree holding. The same node type is used for both internal + // and leaf nodes in the btree, though the nodes are allocated in such a way + // that the children array is only valid in internal nodes. + template + class btree_node { + using is_key_compare_to = typename Params::is_key_compare_to; + using is_multi_container = typename Params::is_multi_container; + using field_type = typename Params::node_count_type; + using allocator_type = typename Params::allocator_type; + using slot_type = typename Params::slot_type; + + public: + using params_type = Params; + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using key_compare = typename Params::key_compare; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + + // Btree decides whether to use linear node search as follows: + // - If the key is arithmetic and the comparator is std::less or + // std::greater, choose linear. + // - Otherwise, choose binary. + // TODO(ezb): Might make sense to add condition(s) based on node-size. + using use_linear_search = std::integral_constant< + bool, + std::is_arithmetic::value && + (std::is_same, key_compare>::value || + std::is_same, key_compare>::value || + std::is_same, key_compare>::value)>; + + + ~btree_node() = default; + btree_node(btree_node const &) = delete; + btree_node &operator=(btree_node const &) = delete; + + // Public for EmptyNodeType. + constexpr static size_type Alignment() { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), + "Alignment of all nodes must be equal."); + return (size_type)InternalLayout().Alignment(); + } + + protected: + btree_node() = default; + + private: + using layout_type = phmap::container_internal::Layout; + constexpr static size_type SizeWithNValues(size_type n) { + return (size_type)layout_type(/*parent*/ 1, + /*position, start, count, max_count*/ 4, + /*values*/ (size_t)n, + /*children*/ 0) + .AllocSize(); + } + // A lower bound for the overhead of fields other than values in a leaf node. + constexpr static size_type MinimumOverhead() { + return (size_type)(SizeWithNValues(1) - sizeof(value_type)); + } + + // Compute how many values we can fit onto a leaf node taking into account + // padding. + constexpr static size_type NodeTargetValues(const int begin, const int end) { + return begin == end ? begin + : SizeWithNValues((begin + end) / 2 + 1) > + params_type::kTargetNodeSize + ? NodeTargetValues(begin, (begin + end) / 2) + : NodeTargetValues((begin + end) / 2 + 1, end); + } + + enum { + kTargetNodeSize = params_type::kTargetNodeSize, + kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), + + // We need a minimum of 3 values per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). + kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + kInternalNodeMaxCount = 0, + }; + + // Leaves can have less than kNodeValues values. + constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { + return layout_type(/*parent*/ 1, + /*position, start, count, max_count*/ 4, + /*values*/ (size_t)max_values, + /*children*/ 0); + } + constexpr static layout_type InternalLayout() { + return layout_type(/*parent*/ 1, + /*position, start, count, max_count*/ 4, + /*values*/ kNodeValues, + /*children*/ kNodeValues + 1); + } + constexpr static size_type LeafSize(const int max_values = kNodeValues) { + return (size_type)LeafLayout(max_values).AllocSize(); + } + constexpr static size_type InternalSize() { + return (size_type)InternalLayout().AllocSize(); + } + + // N is the index of the type in the Layout definition. + // ElementType is the Nth type in the Layout definition. + template + inline typename layout_type::template ElementType *GetField() { + // We assert that we don't read from values that aren't there. + assert(N < 3 || !leaf()); + return InternalLayout().template Pointer(reinterpret_cast(this)); + } + + template + inline const typename layout_type::template ElementType *GetField() const { + assert(N < 3 || !leaf()); + return InternalLayout().template Pointer( + reinterpret_cast(this)); + } + + void set_parent(btree_node *p) { *GetField<0>() = p; } + field_type &mutable_count() { return GetField<1>()[2]; } + slot_type *slot(size_type i) { return &GetField<2>()[i]; } + const slot_type *slot(size_type i) const { return &GetField<2>()[i]; } + void set_position(field_type v) { GetField<1>()[0] = v; } + void set_start(field_type v) { GetField<1>()[1] = v; } + void set_count(field_type v) { GetField<1>()[2] = v; } + void set_max_count(field_type v) { GetField<1>()[3] = v; } + + public: + // Whether this is a leaf node or not. This value doesn't change after the + // node is created. + bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; } + + // Getter for the position of this node in its parent. + field_type position() const { return GetField<1>()[0]; } + + // Getter for the offset of the first value in the `values` array. + field_type start() const { return GetField<1>()[1]; } + + // Getters for the number of values stored in this node. + field_type count() const { return GetField<1>()[2]; } + field_type max_count() const { + // Internal nodes have max_count==kInternalNodeMaxCount. + // Leaf nodes have max_count in [1, kNodeValues]. + const field_type max_count = GetField<1>()[3]; + return max_count == field_type{kInternalNodeMaxCount} + ? field_type{kNodeValues} + : max_count; + } + + // Getter for the parent of this node. + btree_node *parent() const { return *GetField<0>(); } + // Getter for whether the node is the root of the tree. The parent of the + // root of the tree is the leftmost node in the tree which is guaranteed to + // be a leaf. + bool is_root() const { return parent()->leaf(); } + void make_root() { + assert(parent()->is_root()); + set_parent(parent()->parent()); + } + + // Getters for the key/value at position i in the node. + const key_type &key(size_type i) const { return params_type::key(slot(i)); } + reference value(size_type i) { return params_type::element(slot(i)); } + const_reference value(size_type i) const { return params_type::element(slot(i)); } + + // Getters/setter for the child at position i in the node. + btree_node *child(size_type i) const { return GetField<3>()[i]; } + btree_node *&mutable_child(size_type i) { return GetField<3>()[i]; } + void clear_child(size_type i) { + phmap::container_internal::SanitizerPoisonObject(&mutable_child(i)); + } + void set_child(size_type i, btree_node *c) { + phmap::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); + mutable_child(i) = c; + c->set_position((field_type)i); + } + void init_child(int i, btree_node *c) { + set_child(i, c); + c->set_parent(this); + } + + // Returns the position of the first value whose key is not less than k. + template + SearchResult lower_bound( + const K &k, const key_compare &comp) const { + return use_linear_search::value ? linear_search(k, comp) + : binary_search(k, comp); + } + // Returns the position of the first value whose key is greater than k. + template + int upper_bound(const K &k, const key_compare &comp) const { + auto upper_compare = upper_bound_adapter(comp); + return use_linear_search::value ? linear_search(k, upper_compare).value + : binary_search(k, upper_compare).value; + } + + template + SearchResult::value> + linear_search(const K &k, const Compare &comp) const { + return linear_search_impl(k, 0, count(), comp, + btree_is_key_compare_to()); + } + + template + SearchResult::value> + binary_search(const K &k, const Compare &comp) const { + return binary_search_impl(k, 0, count(), comp, + btree_is_key_compare_to()); + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using plain compare. + template + SearchResult linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s < e) { + if (!comp(key(s), k)) { + break; + } + ++s; + } + return {s}; + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using compare-to. + template + SearchResult linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::true_type /* IsCompareTo */) const { + while (s < e) { + const phmap::weak_ordering c = comp(key(s), k); + if (c == 0) { + return {s, MatchKind::kEq}; + } else if (c > 0) { + break; + } + ++s; + } + return {s, MatchKind::kNe}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using plain compare. + template + SearchResult binary_search_impl( + const K &k, int s, int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s != e) { + const int mid = (s + e) >> 1; + if (comp(key(mid), k)) { + s = mid + 1; + } else { + e = mid; + } + } + return {s}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using compare-to. + template + SearchResult binary_search_impl( + const K &k, int s, int e, const CompareTo &comp, + std::true_type /* IsCompareTo */) const { + if (is_multi_container::value) { + MatchKind exact_match = MatchKind::kNe; + while (s != e) { + const int mid = (s + e) >> 1; + const phmap::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else { + e = mid; + if (c == 0) { + // Need to return the first value whose key is not less than k, + // which requires continuing the binary search if this is a + // multi-container. + exact_match = MatchKind::kEq; + } + } + } + return {s, exact_match}; + } else { // Not a multi-container. + while (s != e) { + const int mid = (s + e) >> 1; + const phmap::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else if (c > 0) { + e = mid; + } else { + return {mid, MatchKind::kEq}; + } + } + return {s, MatchKind::kNe}; + } + } + + // Emplaces a value at position i, shifting all existing values and + // children at positions >= i to the right by 1. + template + void emplace_value(size_type i, allocator_type *alloc, Args &&... args); + + // Removes the value at position i, shifting all existing values and children + // at positions > i to the left by 1. + void remove_value(int i, allocator_type *alloc); + + // Removes the values at positions [i, i + to_erase), shifting all values + // after that range to the left by to_erase. Does not change children at all. + void remove_values_ignore_children(int i, size_type to_erase, + allocator_type *alloc); + + // Rebalances a node with its right sibling. + void rebalance_right_to_left(int to_move, btree_node *right, + allocator_type *alloc); + void rebalance_left_to_right(int to_move, btree_node *right, + allocator_type *alloc); + + // Splits a node, moving a portion of the node's values to its right sibling. + void split(int insert_position, btree_node *dest, allocator_type *alloc); + + // Merges a node with its right sibling, moving all of the values and the + // delimiting key in the parent node onto itself. + void merge(btree_node *sibling, allocator_type *alloc); + + // Swap the contents of "this" and "src". + void swap(btree_node *src, allocator_type *alloc); + + // Node allocation/deletion routines. + static btree_node *init_leaf(btree_node *n, btree_node *parent, + int max_count) { + n->set_parent(parent); + n->set_position(0); + n->set_start(0); + n->set_count(0); + n->set_max_count((field_type)max_count); + phmap::container_internal::SanitizerPoisonMemoryRegion( + n->slot(0), max_count * sizeof(slot_type)); + return n; + } + static btree_node *init_internal(btree_node *n, btree_node *parent) { + init_leaf(n, parent, kNodeValues); + // Set `max_count` to a sentinel value to indicate that this node is + // internal. + n->set_max_count(kInternalNodeMaxCount); + phmap::container_internal::SanitizerPoisonMemoryRegion( + &n->mutable_child(0), (kNodeValues + 1) * sizeof(btree_node *)); + return n; + } + void destroy(allocator_type *alloc) { + for (int i = 0; i < count(); ++i) { + value_destroy(i, alloc); + } + } + + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return use_linear_search::value; + } + + private: + template + void value_init(const size_type i, allocator_type *alloc, Args &&... args) { + phmap::container_internal::SanitizerUnpoisonObject(slot(i)); + params_type::construct(alloc, slot(i), std::forward(args)...); + } + void value_destroy(const size_type i, allocator_type *alloc) { + params_type::destroy(alloc, slot(i)); + phmap::container_internal::SanitizerPoisonObject(slot(i)); + } + + // Move n values starting at value i in this node into the values starting at + // value j in node x. + void uninitialized_move_n(const size_type n, const size_type i, + const size_type j, btree_node *x, + allocator_type *alloc) { + phmap::container_internal::SanitizerUnpoisonMemoryRegion( + x->slot(j), n * sizeof(slot_type)); + for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j); + src != end; ++src, ++dest) { + params_type::construct(alloc, dest, src); + } + } + + // Destroys a range of n values, starting at index i. + void value_destroy_n(const size_type i, const size_type n, + allocator_type *alloc) { + for (int j = 0; j < n; ++j) { + value_destroy(i + j, alloc); + } + } + + template + friend class btree; + template + friend struct btree_iterator; + friend class BtreeNodePeer; + }; + + template + struct btree_iterator { + private: + using key_type = typename Node::key_type; + using size_type = typename Node::size_type; + using params_type = typename Node::params_type; + + using node_type = Node; + using normal_node = typename std::remove_const::type; + using const_node = const Node; + using normal_pointer = typename params_type::pointer; + using normal_reference = typename params_type::reference; + using const_pointer = typename params_type::const_pointer; + using const_reference = typename params_type::const_reference; + using slot_type = typename params_type::slot_type; + + using iterator = + btree_iterator; + using const_iterator = + btree_iterator; + + public: + // These aliases are public for std::iterator_traits. + using difference_type = typename Node::difference_type; + using value_type = typename params_type::value_type; + using pointer = Pointer; + using reference = Reference; + using iterator_category = std::bidirectional_iterator_tag; + + btree_iterator() : node(nullptr), position(-1) {} + btree_iterator(Node *n, int p) : node(n), position(p) {} + + // NOTE: this SFINAE allows for implicit conversions from iterator to + // const_iterator, but it specifically avoids defining copy constructors so + // that btree_iterator can be trivially copyable. This is for performance and + // binary size reasons. + template , iterator>::value && + std::is_same::value, + int> = 0> + btree_iterator(const btree_iterator &x) // NOLINT + : node(x.node), position(x.position) {} + + private: + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids defining a copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template , const_iterator>::value && + std::is_same::value, + int> = 0> + explicit btree_iterator(const btree_iterator &x) + : node(const_cast(x.node)), position(x.position) {} + + // Increment/decrement the iterator. + void increment() { + if (node->leaf() && ++position < node->count()) { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() { + if (node->leaf() && --position >= 0) { + return; + } + decrement_slow(); + } + void decrement_slow(); + + public: + bool operator==(const const_iterator &x) const { + return node == x.node && position == x.position; + } + bool operator!=(const const_iterator &x) const { + return node != x.node || position != x.position; + } + + // Accessors for the key/value the iterator is pointing at. + reference operator*() const { + return node->value(position); + } + pointer operator->() const { + return &node->value(position); + } + + btree_iterator& operator++() { + increment(); + return *this; + } + btree_iterator& operator--() { + decrement(); + return *this; + } + btree_iterator operator++(int) { + btree_iterator tmp = *this; + ++*this; + return tmp; + } + btree_iterator operator--(int) { + btree_iterator tmp = *this; + --*this; + return tmp; + } + + private: + template + friend class btree; + template + friend class btree_container; + template + friend class btree_set_container; + template + friend class btree_map_container; + template + friend class btree_multiset_container; + template + friend struct btree_iterator; + template + friend class base_checker; + + const key_type &key() const { return node->key(position); } + slot_type *slot() { return node->slot(position); } + + // The node in the tree the iterator is pointing at. + Node *node; + // The position within the node of the tree the iterator is pointing at. + // TODO(ezb): make this a field_type + int position; + }; + + template + class btree { + using node_type = btree_node; + using is_key_compare_to = typename Params::is_key_compare_to; + + // We use a static empty node for the root/leftmost/rightmost of empty btrees + // in order to avoid branching in begin()/end(). + struct alignas(node_type::Alignment()) EmptyNodeType : node_type { + using field_type = typename node_type::field_type; + node_type *parent; + field_type position = 0; + field_type start = 0; + field_type count = 0; + // max_count must be != kInternalNodeMaxCount (so that this node is regarded + // as a leaf node). max_count() is never called when the tree is empty. + field_type max_count = node_type::kInternalNodeMaxCount + 1; + +#ifdef _MSC_VER + // MSVC has constexpr code generations bugs here. + EmptyNodeType() : parent(this) {} +#else + constexpr EmptyNodeType(node_type *p) : parent(p) {} +#endif + }; + + static node_type *EmptyNode() { +#ifdef _MSC_VER + static EmptyNodeType* empty_node = new EmptyNodeType; + // This assert fails on some other construction methods. + assert(empty_node->parent == empty_node); + return empty_node; +#else + static constexpr EmptyNodeType empty_node( + const_cast(&empty_node)); + return const_cast(&empty_node); +#endif + } + + enum { + kNodeValues = node_type::kNodeValues, + kMinNodeValues = kNodeValues / 2, + }; + + struct node_stats { + using size_type = typename Params::size_type; + + node_stats(size_type l, size_type i) + : leaf_nodes(l), + internal_nodes(i) { + } + + node_stats& operator+=(const node_stats &x) { + leaf_nodes += x.leaf_nodes; + internal_nodes += x.internal_nodes; + return *this; + } + + size_type leaf_nodes; + size_type internal_nodes; + }; + + public: + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + using key_compare = typename Params::key_compare; + using value_compare = typename Params::value_compare; + using allocator_type = typename Params::allocator_type; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using iterator = btree_iterator; + using const_iterator = typename iterator::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using node_handle_type = node_handle; + + // Internal types made public for use by btree_container types. + using params_type = Params; + using slot_type = typename Params::slot_type; + + private: + // For use in copy_or_move_values_in_order. + const value_type &maybe_move_from_iterator(const_iterator x) { return *x; } + value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); } + + // Copies or moves (depending on the template parameter) the values in + // x into this btree in their order in x. This btree must be empty before this + // method is called. This method is used in copy construction, copy + // assignment, and move assignment. + template + void copy_or_move_values_in_order(Btree *x); + + // Validates that various assumptions/requirements are true at compile time. + constexpr static bool static_assert_validation(); + + public: + btree(const key_compare &comp, const allocator_type &alloc); + + btree(const btree &x); + btree(btree &&x) noexcept + : root_(std::move(x.root_)), + rightmost_(phmap::exchange(x.rightmost_, EmptyNode())), + size_(phmap::exchange(x.size_, 0)) { + x.mutable_root() = EmptyNode(); + } + + ~btree() { + // Put static_asserts in destructor to avoid triggering them before the type + // is complete. + static_assert(static_assert_validation(), "This call must be elided."); + clear(); + } + + // Assign the contents of x to *this. + btree &operator=(const btree &x); + btree &operator=(btree &&x) noexcept; + + iterator begin() { + return iterator(leftmost(), 0); + } + const_iterator begin() const { + return const_iterator(leftmost(), 0); + } + iterator end() { return iterator(rightmost_, rightmost_->count()); } + const_iterator end() const { + return const_iterator(rightmost_, rightmost_->count()); + } + reverse_iterator rbegin() { + return reverse_iterator(end()); + } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { + return reverse_iterator(begin()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Finds the first element whose key is not less than key. + template + iterator lower_bound(const K &key) { + return internal_end(internal_lower_bound(key)); + } + template + const_iterator lower_bound(const K &key) const { + return internal_end(internal_lower_bound(key)); + } + + // Finds the first element whose key is greater than key. + template + iterator upper_bound(const K &key) { + return internal_end(internal_upper_bound(key)); + } + template + const_iterator upper_bound(const K &key) const { + return internal_end(internal_upper_bound(key)); + } + + // Finds the range of values which compare equal to key. The first member of + // the returned pair is equal to lower_bound(key). The second member pair of + // the pair is equal to upper_bound(key). + template + std::pair equal_range(const K &key) { + return {lower_bound(key), upper_bound(key)}; + } + template + std::pair equal_range(const K &key) const { + return {lower_bound(key), upper_bound(key)}; + } + + // Inserts a value into the btree only if it does not already exist. The + // boolean return value indicates whether insertion succeeded or failed. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_unique(const key_type &key, Args &&... args); + + // Inserts with hint. Checks to see if the value should be placed immediately + // before `position` in the tree. If so, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_unique() were made. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_hint_unique(iterator position, + const key_type &key, + Args &&... args); + + // Insert a range of values into the btree. + template + void insert_iterator_unique(InputIterator b, InputIterator e); + + // Inserts a value into the btree. + template + iterator insert_multi(const key_type &key, ValueType &&v); + + // Inserts a value into the btree. + template + iterator insert_multi(ValueType &&v) { + return insert_multi(params_type::key(v), std::forward(v)); + } + + // Insert with hint. Check to see if the value should be placed immediately + // before position in the tree. If it does, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_multi(v) were made. + template + iterator insert_hint_multi(iterator position, ValueType &&v); + + // Insert a range of values into the btree. + template + void insert_iterator_multi(InputIterator b, InputIterator e); + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + // Requirement: does not read the value at `*iter`. + iterator erase(iterator iter); + + // Erases range. Returns the number of keys erased and an iterator pointing + // to the element after the last erased element. + std::pair erase(iterator begin, iterator end); + + // Erases the specified key from the btree. Returns 1 if an element was + // erased and 0 otherwise. + template + size_type erase_unique(const K &key); + + // Erases all of the entries matching the specified key from the + // btree. Returns the number of elements erased. + template + size_type erase_multi(const K &key); + + // Finds the iterator corresponding to a key or returns end() if the key is + // not present. + template + iterator find(const K &key) { + return internal_end(internal_find(key)); + } + template + const_iterator find(const K &key) const { + return internal_end(internal_find(key)); + } + + // Returns a count of the number of times the key appears in the btree. + template + size_type count_unique(const K &key) const { + const iterator begin = internal_find(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree. + return 0; + } + return 1; + } + // Returns a count of the number of times the key appears in the btree. + template + size_type count_multi(const K &key) const { + const auto range = equal_range(key); + return std::distance(range.first, range.second); + } + + // Clear the btree, deleting all of the values it contains. + void clear(); + + // Swap the contents of *this and x. + void swap(btree &x); + + const key_compare &key_comp() const noexcept { + return root_.template get<0>(); + } + template + bool compare_keys(const K &x, const LK &y) const { + return compare_internal::compare_result_as_less_than(key_comp()(x, y)); + } + + value_compare value_comp() const { return value_compare(key_comp()); } + + // Verifies the structure of the btree. + void verify() const; + + // Size routines. + size_type size() const { return size_; } + size_type max_size() const { return (std::numeric_limits::max)(); } + bool empty() const { return size_ == 0; } + + // The height of the btree. An empty tree will have height 0. + size_type height() const { + size_type h = 0; + if (!empty()) { + // Count the length of the chain from the leftmost node up to the + // root. We actually count from the root back around to the level below + // the root, but the calculation is the same because of the circularity + // of that traversal. + const node_type *n = root(); + do { + ++h; + n = n->parent(); + } while (n != root()); + } + return h; + } + + // The number of internal, leaf and total nodes used by the btree. + size_type leaf_nodes() const { + return internal_stats(root()).leaf_nodes; + } + size_type internal_nodes() const { + return internal_stats(root()).internal_nodes; + } + size_type nodes() const { + node_stats stats = internal_stats(root()); + return stats.leaf_nodes + stats.internal_nodes; + } + + // The total number of bytes used by the btree. + size_type bytes_used() const { + node_stats stats = internal_stats(root()); + if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { + return sizeof(*this) + + node_type::LeafSize(root()->max_count()); + } else { + return sizeof(*this) + + stats.leaf_nodes * node_type::LeafSize() + + stats.internal_nodes * node_type::InternalSize(); + } + } + + // The average number of bytes used per value stored in the btree. + static double average_bytes_per_value() { + // Returns the number of bytes per value on a leaf node that is 75% + // full. Experimentally, this matches up nicely with the computed number of + // bytes per value in trees that had their values inserted in random order. + return node_type::LeafSize() / (kNodeValues * 0.75); + } + + // The fullness of the btree. Computed as the number of elements in the btree + // divided by the maximum number of elements a tree with the current number + // of nodes could hold. A value of 1 indicates perfect space + // utilization. Smaller values indicate space wastage. + // Returns 0 for empty trees. + double fullness() const { + if (empty()) return 0.0; + return static_cast(size()) / (nodes() * kNodeValues); + } + // The overhead of the btree structure in bytes per node. Computed as the + // total number of bytes used by the btree minus the number of bytes used for + // storing elements divided by the number of elements. + // Returns 0 for empty trees. + double overhead() const { + if (empty()) return 0.0; + return (bytes_used() - size() * sizeof(value_type)) / + static_cast(size()); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { + return allocator(); + } + + private: + // Internal accessor routines. + node_type *root() { return root_.template get<2>(); } + const node_type *root() const { return root_.template get<2>(); } + node_type *&mutable_root() noexcept { return root_.template get<2>(); } + key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); } + + // The leftmost node is stored as the parent of the root node. + node_type *leftmost() { return root()->parent(); } + const node_type *leftmost() const { return root()->parent(); } + + // Allocator routines. + allocator_type *mutable_allocator() noexcept { + return &root_.template get<1>(); + } + const allocator_type &allocator() const noexcept { + return root_.template get<1>(); + } + + // Allocates a correctly aligned node of at least size bytes using the + // allocator. + node_type *allocate(const size_type size) { + return reinterpret_cast( + phmap::container_internal::Allocate( + mutable_allocator(), (size_t)size)); + } + + // Node creation/deletion routines. + node_type* new_internal_node(node_type *parent) { + node_type *p = allocate(node_type::InternalSize()); + return node_type::init_internal(p, parent); + } + node_type* new_leaf_node(node_type *parent) { + node_type *p = allocate(node_type::LeafSize()); + return node_type::init_leaf(p, parent, kNodeValues); + } + node_type *new_leaf_root_node(const int max_count) { + node_type *p = allocate(node_type::LeafSize(max_count)); + return node_type::init_leaf(p, p, max_count); + } + + // Deletion helper routines. + void erase_same_node(iterator begin, iterator end); + iterator erase_from_leaf_node(iterator begin, size_type to_erase); + iterator rebalance_after_delete(iterator iter); + + // Deallocates a node of a certain size in bytes using the allocator. + void deallocate(const size_type size, node_type *node) { + phmap::container_internal::Deallocate( + mutable_allocator(), node, (size_t)size); + } + + void delete_internal_node(node_type *node) { + node->destroy(mutable_allocator()); + deallocate(node_type::InternalSize(), node); + } + void delete_leaf_node(node_type *node) { + node->destroy(mutable_allocator()); + deallocate(node_type::LeafSize(node->max_count()), node); + } + + // Rebalances or splits the node iter points to. + void rebalance_or_split(iterator *iter); + + // Merges the values of left, right and the delimiting key on their parent + // onto left, removing the delimiting key and deleting right. + void merge_nodes(node_type *left, node_type *right); + + // Tries to merge node with its left or right sibling, and failing that, + // rebalance with its left or right sibling. Returns true if a merge + // occurred, at which point it is no longer valid to access node. Returns + // false if no merging took place. + bool try_merge_or_rebalance(iterator *iter); + + // Tries to shrink the height of the tree by 1. + void try_shrink(); + + iterator internal_end(iterator iter) { + return iter.node != nullptr ? iter : end(); + } + const_iterator internal_end(const_iterator iter) const { + return iter.node != nullptr ? iter : end(); + } + + // Emplaces a value into the btree immediately before iter. Requires that + // key(v) <= iter.key() and (--iter).key() <= key(v). + template + iterator internal_emplace(iterator iter, Args &&... args); + + // Returns an iterator pointing to the first value >= the value "iter" is + // pointing at. Note that "iter" might be pointing to an invalid location as + // iter.position == iter.node->count(). This routine simply moves iter up in + // the tree to a valid location. + // Requires: iter.node is non-null. + template + static IterType internal_last(IterType iter); + + // Returns an iterator pointing to the leaf position at which key would + // reside in the tree. We provide 2 versions of internal_locate. The first + // version uses a less-than comparator and is incapable of distinguishing when + // there is an exact match. The second version is for the key-compare-to + // specialization and distinguishes exact matches. The key-compare-to + // specialization allows the caller to avoid a subsequent comparison to + // determine if an exact match was made, which is important for keys with + // expensive comparison, such as strings. + template + SearchResult internal_locate( + const K &key) const; + + template + SearchResult internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const; + + template + SearchResult internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const; + + // Internal routine which implements lower_bound(). + template + iterator internal_lower_bound(const K &key) const; + + // Internal routine which implements upper_bound(). + template + iterator internal_upper_bound(const K &key) const; + + // Internal routine which implements find(). + template + iterator internal_find(const K &key) const; + + // Deletes a node and all of its children. + void internal_clear(node_type *node); + + // Verifies the tree structure of node. + int internal_verify(const node_type *node, + const key_type *lo, const key_type *hi) const; + + node_stats internal_stats(const node_type *node) const { + // The root can be a static empty node. + if (node == nullptr || (node == root() && empty())) { + return node_stats(0, 0); + } + if (node->leaf()) { + return node_stats(1, 0); + } + node_stats res(0, 1); + for (int i = 0; i <= node->count(); ++i) { + res += internal_stats(node->child(i)); + } + return res; + } + + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return node_type::testonly_uses_linear_node_search(); + } + + private: + // We use compressed tuple in order to save space because key_compare and + // allocator_type are usually empty. + phmap::container_internal::CompressedTuple + root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. + node_type *rightmost_; + + // Number of values. + size_type size_; + }; + + //// + // btree_node methods + template + template + inline void btree_node

::emplace_value(const size_type i, + allocator_type *alloc, + Args &&... args) { + assert(i <= count()); + // Shift old values to create space for new value and then construct it in + // place. + if (i < count()) { + value_init(count(), alloc, slot(count() - 1)); + for (size_type j = count() - 1; j > i; --j) + params_type::move(alloc, slot(j - 1), slot(j)); + value_destroy(i, alloc); + } + value_init(i, alloc, std::forward(args)...); + set_count((field_type)(count() + 1)); + + if (!leaf() && count() > i + 1) { + for (int j = count(); j > i + 1; --j) { + set_child(j, child(j - 1)); + } + clear_child(i + 1); + } + } + + template + inline void btree_node

::remove_value(const int i, allocator_type *alloc) { + if (!leaf() && count() > i + 1) { + assert(child(i + 1)->count() == 0); + for (size_type j = i + 1; j < count(); ++j) { + set_child(j, child(j + 1)); + } + clear_child(count()); + } + + remove_values_ignore_children(i, /*to_erase=*/1, alloc); + } + + template + inline void btree_node

::remove_values_ignore_children( + int i, size_type to_erase, allocator_type *alloc) { + params_type::move(alloc, slot(i + to_erase), slot(count()), slot(i)); + value_destroy_n(count() - to_erase, to_erase, alloc); + set_count((field_type)(count() - to_erase)); + } + + template + void btree_node

::rebalance_right_to_left(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(right->count() >= count()); + assert(to_move >= 1); + assert(to_move <= right->count()); + + // 1) Move the delimiting value in the parent to the left node. + value_init(count(), alloc, parent()->slot(position())); + + // 2) Move the (to_move - 1) values from the right node to the left node. + right->uninitialized_move_n(to_move - 1, 0, count() + 1, this, alloc); + + // 3) Move the new delimiting value to the parent from the right node. + params_type::move(alloc, right->slot(to_move - 1), + parent()->slot(position())); + + // 4) Shift the values in the right node to their correct position. + params_type::move(alloc, right->slot(to_move), right->slot(right->count()), + right->slot(0)); + + // 5) Destroy the now-empty to_move entries in the right node. + right->value_destroy_n(right->count() - to_move, to_move, alloc); + + if (!leaf()) { + // Move the child pointers from the right to the left node. + for (int i = 0; i < to_move; ++i) { + init_child(count() + i + 1, right->child(i)); + } + for (int i = 0; i <= right->count() - to_move; ++i) { + assert(i + to_move <= right->max_count()); + right->init_child(i, right->child(i + to_move)); + right->clear_child(i + to_move); + } + } + + // Fixup the counts on the left and right nodes. + set_count((field_type)(count() + to_move)); + right->set_count((field_type)(right->count() - to_move)); + } + + template + void btree_node

::rebalance_left_to_right(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(count() >= right->count()); + assert(to_move >= 1); + assert(to_move <= count()); + + // Values in the right node are shifted to the right to make room for the + // new to_move values. Then, the delimiting value in the parent and the + // other (to_move - 1) values in the left node are moved into the right node. + // Lastly, a new delimiting value is moved from the left node into the + // parent, and the remaining empty left node entries are destroyed. + + if (right->count() >= to_move) { + // The original location of the right->count() values are sufficient to hold + // the new to_move entries from the parent and left node. + + // 1) Shift existing values in the right node to their correct positions. + right->uninitialized_move_n(to_move, right->count() - to_move, + right->count(), right, alloc); + for (slot_type *src = right->slot(right->count() - to_move - 1), + *dest = right->slot(right->count() - 1), + *end = right->slot(0); + src >= end; --src, --dest) { + params_type::move(alloc, src, dest); + } + + // 2) Move the delimiting value in the parent to the right node. + params_type::move(alloc, parent()->slot(position()), + right->slot(to_move - 1)); + + // 3) Move the (to_move - 1) values from the left node to the right node. + params_type::move(alloc, slot(count() - (to_move - 1)), slot(count()), + right->slot(0)); + } else { + // The right node does not have enough initialized space to hold the new + // to_move entries, so part of them will move to uninitialized space. + + // 1) Shift existing values in the right node to their correct positions. + right->uninitialized_move_n(right->count(), 0, to_move, right, alloc); + + // 2) Move the delimiting value in the parent to the right node. + right->value_init(to_move - 1, alloc, parent()->slot(position())); + + // 3) Move the (to_move - 1) values from the left node to the right node. + const size_type uninitialized_remaining = to_move - right->count() - 1; + uninitialized_move_n(uninitialized_remaining, + count() - uninitialized_remaining, right->count(), + right, alloc); + params_type::move(alloc, slot(count() - (to_move - 1)), + slot(count() - uninitialized_remaining), right->slot(0)); + } + + // 4) Move the new delimiting value to the parent from the left node. + params_type::move(alloc, slot(count() - to_move), parent()->slot(position())); + + // 5) Destroy the now-empty to_move entries in the left node. + value_destroy_n(count() - to_move, to_move, alloc); + + if (!leaf()) { + // Move the child pointers from the left to the right node. + for (int i = right->count(); i >= 0; --i) { + right->init_child(i + to_move, right->child(i)); + right->clear_child(i); + } + for (int i = 1; i <= to_move; ++i) { + right->init_child(i - 1, child(count() - to_move + i)); + clear_child(count() - to_move + i); + } + } + + // Fixup the counts on the left and right nodes. + set_count((field_type)(count() - to_move)); + right->set_count((field_type)(right->count() + to_move)); + } + + template + void btree_node

::split(const int insert_position, btree_node *dest, + allocator_type *alloc) { + assert(dest->count() == 0); + assert(max_count() == kNodeValues); + + // We bias the split based on the position being inserted. If we're + // inserting at the beginning of the left node then bias the split to put + // more values on the right node. If we're inserting at the end of the + // right node then bias the split to put more values on the left node. + if (insert_position == 0) { + dest->set_count((field_type)(count() - 1)); + } else if (insert_position == kNodeValues) { + dest->set_count(0); + } else { + dest->set_count((field_type)(count() / 2)); + } + set_count((field_type)(count() - dest->count())); + assert(count() >= 1); + + // Move values from the left sibling to the right sibling. + uninitialized_move_n(dest->count(), count(), 0, dest, alloc); + + // Destroy the now-empty entries in the left node. + value_destroy_n(count(), dest->count(), alloc); + + // The split key is the largest value in the left sibling. + set_count((field_type)(count() - 1)); + parent()->emplace_value(position(), alloc, slot(count())); + value_destroy(count(), alloc); + parent()->init_child(position() + 1, dest); + + if (!leaf()) { + for (int i = 0; i <= dest->count(); ++i) { + assert(child(count() + i + 1) != nullptr); + dest->init_child(i, child(count() + i + 1)); + clear_child(count() + i + 1); + } + } + } + + template + void btree_node

::merge(btree_node *src, allocator_type *alloc) { + assert(parent() == src->parent()); + assert(position() + 1 == src->position()); + + // Move the delimiting value to the left node. + value_init(count(), alloc, parent()->slot(position())); + + // Move the values from the right to the left node. + src->uninitialized_move_n(src->count(), 0, count() + 1, this, alloc); + + // Destroy the now-empty entries in the right node. + src->value_destroy_n(0, src->count(), alloc); + + if (!leaf()) { + // Move the child pointers from the right to the left node. + for (int i = 0; i <= src->count(); ++i) { + init_child(count() + i + 1, src->child(i)); + src->clear_child(i); + } + } + + // Fixup the counts on the src and dest nodes. + set_count((field_type)(1 + count() + src->count())); + src->set_count(0); + + // Remove the value on the parent node. + parent()->remove_value(position(), alloc); + } + + template + void btree_node

::swap(btree_node *x, allocator_type *alloc) { + using std::swap; + assert(leaf() == x->leaf()); + + // Determine which is the smaller/larger node. + btree_node *smaller = this, *larger = x; + if (smaller->count() > larger->count()) { + swap(smaller, larger); + } + + // Swap the values. + for (slot_type *a = smaller->slot(0), *b = larger->slot(0), + *end = a + smaller->count(); + a != end; ++a, ++b) { + params_type::swap(alloc, a, b); + } + + // Move values that can't be swapped. + const size_type to_move = larger->count() - smaller->count(); + larger->uninitialized_move_n(to_move, smaller->count(), smaller->count(), + smaller, alloc); + larger->value_destroy_n(smaller->count(), to_move, alloc); + + if (!leaf()) { + // Swap the child pointers. + std::swap_ranges(&smaller->mutable_child(0), + &smaller->mutable_child(smaller->count() + 1), + &larger->mutable_child(0)); + // Update swapped children's parent pointers. + int i = 0; + for (; i <= smaller->count(); ++i) { + smaller->child(i)->set_parent(smaller); + larger->child(i)->set_parent(larger); + } + // Move the child pointers that couldn't be swapped. + for (; i <= larger->count(); ++i) { + smaller->init_child(i, larger->child(i)); + larger->clear_child(i); + } + } + + // Swap the counts. + swap(mutable_count(), x->mutable_count()); + } + + //// + // btree_iterator methods + template + void btree_iterator::increment_slow() { + if (node->leaf()) { + assert(position >= node->count()); + btree_iterator save(*this); + while (position == node->count() && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position(); + node = node->parent(); + } + if (position == node->count()) { + *this = save; + } + } else { + assert(position < node->count()); + node = node->child(position + 1); + while (!node->leaf()) { + node = node->child(0); + } + position = 0; + } + } + + template + void btree_iterator::decrement_slow() { + if (node->leaf()) { + assert(position <= -1); + btree_iterator save(*this); + while (position < 0 && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position() - 1; + node = node->parent(); + } + if (position < 0) { + *this = save; + } + } else { + assert(position >= 0); + node = node->child(position); + while (!node->leaf()) { + node = node->child(node->count()); + } + position = node->count() - 1; + } + } + + //// + // btree methods + template + template + void btree

::copy_or_move_values_in_order(Btree *x) { + static_assert(std::is_same::value || + std::is_same::value, + "Btree type must be same or const."); + assert(empty()); + + // We can avoid key comparisons because we know the order of the + // values is the same order we'll store them in. + auto iter = x->begin(); + if (iter == x->end()) return; + insert_multi(maybe_move_from_iterator(iter)); + ++iter; + for (; iter != x->end(); ++iter) { + // If the btree is not empty, we can just insert the new value at the end + // of the tree. + internal_emplace(end(), maybe_move_from_iterator(iter)); + } + } + + template + constexpr bool btree

::static_assert_validation() { + static_assert(std::is_nothrow_copy_constructible::value, + "Key comparison must be nothrow copy constructible"); + static_assert(std::is_nothrow_copy_constructible::value, + "Allocator must be nothrow copy constructible"); + static_assert(type_traits_internal::is_trivially_copyable::value, + "iterator not trivially copyable."); + + // Note: We assert that kTargetValues, which is computed from + // Params::kTargetNodeSize, must fit the node_type::field_type. + static_assert( + kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), + "target node size too large"); + + // Verify that key_compare returns an phmap::{weak,strong}_ordering or bool. + using compare_result_type = + phmap::result_of_t; + static_assert( + std::is_same::value || + std::is_convertible::value, + "key comparison function must return phmap::{weak,strong}_ordering or " + "bool."); + + // Test the assumption made in setting kNodeValueSpace. + static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, + "node space assumption incorrect"); + + return true; + } + + template + btree

::btree(const key_compare &comp, const allocator_type &alloc) + : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} + + template + btree

::btree(const btree &x) : btree(x.key_comp(), x.allocator()) { + copy_or_move_values_in_order(&x); + } + + template + template + auto btree

::insert_unique(const key_type &key, Args &&... args) + -> std::pair { + if (empty()) { + mutable_root() = rightmost_ = new_leaf_root_node(1); + } + + auto res = internal_locate(key); + iterator &iter = res.value; + + if (res.HasMatch()) { + if (res.IsEq()) { + // The key already exists in the tree, do nothing. + return {iter, false}; + } + } else { + iterator last = internal_last(iter); + if (last.node && !compare_keys(key, last.key())) { + // The key already exists in the tree, do nothing. + return {last, false}; + } + } + return {internal_emplace(iter, std::forward(args)...), true}; + } + + template + template + inline auto btree

::insert_hint_unique(iterator position, const key_type &key, + Args &&... args) + -> std::pair { + if (!empty()) { + if (position == end() || compare_keys(key, position.key())) { + iterator prev = position; + if (position == begin() || compare_keys((--prev).key(), key)) { + // prev.key() < key < position.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else if (compare_keys(position.key(), key)) { + ++position; + if (position == end() || compare_keys(key, position.key())) { + // {original `position`}.key() < key < {current `position`}.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else { + // position.key() == key + return {position, false}; + } + } + return insert_unique(key, std::forward(args)...); + } + + template + template + void btree

::insert_iterator_unique(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_unique(end(), params_type::key(*b), *b); + } + } + + template + template + auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { + if (empty()) { + mutable_root() = rightmost_ = new_leaf_root_node(1); + } + + iterator iter = internal_upper_bound(key); + if (iter.node == nullptr) { + iter = end(); + } + return internal_emplace(iter, std::forward(v)); + } + + template + template + auto btree

::insert_hint_multi(iterator position, ValueType &&v) -> iterator { + if (!empty()) { + const key_type &key = params_type::key(v); + if (position == end() || !compare_keys(position.key(), key)) { + iterator prev = position; + if (position == begin() || !compare_keys(key, (--prev).key())) { + // prev.key() <= key <= position.key() + return internal_emplace(position, std::forward(v)); + } + } else { + iterator next = position; + ++next; + if (next == end() || !compare_keys(next.key(), key)) { + // position.key() < key <= next.key() + return internal_emplace(next, std::forward(v)); + } + } + } + return insert_multi(std::forward(v)); + } + + template + template + void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_multi(end(), *b); + } + } + + template + auto btree

::operator=(const btree &x) -> btree & { + if (this != &x) { + clear(); + + *mutable_key_comp() = x.key_comp(); + if (phmap::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + *mutable_allocator() = x.allocator(); + } + + copy_or_move_values_in_order(&x); + } + return *this; + } + + template + auto btree

::operator=(btree &&x) noexcept -> btree & { + if (this != &x) { + clear(); + + using std::swap; + if (phmap::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + // Note: `root_` also contains the allocator and the key comparator. + swap(root_, x.root_); + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); + } else { + if (allocator() == x.allocator()) { + swap(mutable_root(), x.mutable_root()); + swap(*mutable_key_comp(), *x.mutable_key_comp()); + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); + } else { + // We aren't allowed to propagate the allocator and the allocator is + // different so we can't take over its memory. We must move each element + // individually. We need both `x` and `this` to have `x`s key comparator + // while moving the values so we can't swap the key comparators. + *mutable_key_comp() = x.key_comp(); + copy_or_move_values_in_order(&x); + } + } + } + return *this; + } + + template + auto btree

::erase(iterator iter) -> iterator { + bool internal_delete = false; + if (!iter.node->leaf()) { + // Deletion of a value on an internal node. First, move the largest value + // from our left child here, then delete that position (in remove_value() + // below). We can get to the largest value from our left child by + // decrementing iter. + iterator internal_iter(iter); + --iter; + assert(iter.node->leaf()); + params_type::move(mutable_allocator(), iter.node->slot(iter.position), + internal_iter.node->slot(internal_iter.position)); + internal_delete = true; + } + + // Delete the key from the leaf. + iter.node->remove_value(iter.position, mutable_allocator()); + --size_; + + // We want to return the next value after the one we just erased. If we + // erased from an internal node (internal_delete == true), then the next + // value is ++(++iter). If we erased from a leaf node (internal_delete == + // false) then the next value is ++iter. Note that ++iter may point to an + // internal node and the value in the internal node may move to a leaf node + // (iter.node) when rebalancing is performed at the leaf level. + + iterator res = rebalance_after_delete(iter); + + // If we erased from an internal node, advance the iterator. + if (internal_delete) { + ++res; + } + return res; + } + + template + auto btree

::rebalance_after_delete(iterator iter) -> iterator { + // Merge/rebalance as we walk back up the tree. + iterator res(iter); + bool first_iteration = true; + for (;;) { + if (iter.node == root()) { + try_shrink(); + if (empty()) { + return end(); + } + break; + } + if (iter.node->count() >= kMinNodeValues) { + break; + } + bool merged = try_merge_or_rebalance(&iter); + // On the first iteration, we should update `res` with `iter` because `res` + // may have been invalidated. + if (first_iteration) { + res = iter; + first_iteration = false; + } + if (!merged) { + break; + } + iter.position = iter.node->position(); + iter.node = iter.node->parent(); + } + + // Adjust our return value. If we're pointing at the end of a node, advance + // the iterator. + if (res.position == res.node->count()) { + res.position = res.node->count() - 1; + ++res; + } + + return res; + } + + template + auto btree

::erase(iterator begin, iterator end) + -> std::pair { + difference_type count = std::distance(begin, end); + assert(count >= 0); + + if (count == 0) { + return {0, begin}; + } + + if (count == size_) { + clear(); + return {count, this->end()}; + } + + if (begin.node == end.node) { + erase_same_node(begin, end); + size_ -= count; + return {count, rebalance_after_delete(begin)}; + } + + const size_type target_size = size_ - count; + while (size_ > target_size) { + if (begin.node->leaf()) { + const size_type remaining_to_erase = size_ - target_size; + const size_type remaining_in_node = begin.node->count() - begin.position; + begin = erase_from_leaf_node( + begin, (std::min)(remaining_to_erase, remaining_in_node)); + } else { + begin = erase(begin); + } + } + return {count, begin}; + } + + template + void btree

::erase_same_node(iterator begin, iterator end) { + assert(begin.node == end.node); + assert(end.position > begin.position); + + node_type *node = begin.node; + size_type to_erase = end.position - begin.position; + if (!node->leaf()) { + // Delete all children between begin and end. + for (size_type i = 0; i < to_erase; ++i) { + internal_clear(node->child(begin.position + i + 1)); + } + // Rotate children after end into new positions. + for (size_type i = begin.position + to_erase + 1; i <= node->count(); ++i) { + node->set_child(i - to_erase, node->child(i)); + node->clear_child(i); + } + } + node->remove_values_ignore_children(begin.position, to_erase, + mutable_allocator()); + + // Do not need to update rightmost_, because + // * either end == this->end(), and therefore node == rightmost_, and still + // exists + // * or end != this->end(), and therefore rightmost_ hasn't been erased, since + // it wasn't covered in [begin, end) + } + + template + auto btree

::erase_from_leaf_node(iterator begin, size_type to_erase) + -> iterator { + node_type *node = begin.node; + assert(node->leaf()); + assert(node->count() > begin.position); + assert(begin.position + to_erase <= node->count()); + + node->remove_values_ignore_children(begin.position, to_erase, + mutable_allocator()); + + size_ -= to_erase; + + return rebalance_after_delete(begin); + } + + template + template + auto btree

::erase_unique(const K &key) -> size_type { + const iterator iter = internal_find(key); + if (iter.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + erase(iter); + return 1; + } + + template + template + auto btree

::erase_multi(const K &key) -> size_type { + const iterator begin = internal_lower_bound(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + // Delete all of the keys between begin and upper_bound(key). + const iterator end = internal_end(internal_upper_bound(key)); + return erase(begin, end).first; + } + + template + void btree

::clear() { + if (!empty()) { + internal_clear(root()); + } + mutable_root() = EmptyNode(); + rightmost_ = EmptyNode(); + size_ = 0; + } + + template + void btree

::swap(btree &x) { + using std::swap; + if (phmap::allocator_traits< + allocator_type>::propagate_on_container_swap::value) { + // Note: `root_` also contains the allocator and the key comparator. + swap(root_, x.root_); + } else { + // It's undefined behavior if the allocators are unequal here. + assert(allocator() == x.allocator()); + swap(mutable_root(), x.mutable_root()); + swap(*mutable_key_comp(), *x.mutable_key_comp()); + } + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); + } + + template + void btree

::verify() const { + assert(root() != nullptr); + assert(leftmost() != nullptr); + assert(rightmost_ != nullptr); + assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); + assert(leftmost() == (++const_iterator(root(), -1)).node); + assert(rightmost_ == (--const_iterator(root(), root()->count())).node); + assert(leftmost()->leaf()); + assert(rightmost_->leaf()); + } + + template + void btree

::rebalance_or_split(iterator *iter) { + node_type *&node = iter->node; + int &insert_position = iter->position; + assert(node->count() == node->max_count()); + assert(kNodeValues == node->max_count()); + + // First try to make room on the node by rebalancing. + node_type *parent = node->parent(); + if (node != root()) { + if (node->position() > 0) { + // Try rebalancing with our left sibling. + node_type *left = parent->child(node->position() - 1); + assert(left->max_count() == kNodeValues); + if (left->count() < kNodeValues) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the end of the right node then we bias rebalancing to + // fill up the left node. + int to_move = (kNodeValues - left->count()) / + (1 + (insert_position < kNodeValues)); + to_move = (std::max)(1, to_move); + + if (((insert_position - to_move) >= 0) || + ((left->count() + to_move) < kNodeValues)) { + left->rebalance_right_to_left(to_move, node, mutable_allocator()); + + assert(node->max_count() - node->count() == to_move); + insert_position = insert_position - to_move; + if (insert_position < 0) { + insert_position = insert_position + left->count() + 1; + node = left; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + if (node->position() < parent->count()) { + // Try rebalancing with our right sibling. + node_type *right = parent->child(node->position() + 1); + assert(right->max_count() == kNodeValues); + if (right->count() < kNodeValues) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the beginning of the left node then we bias rebalancing + // to fill up the right node. + int to_move = + (kNodeValues - right->count()) / (1 + (insert_position > 0)); + to_move = (std::max)(1, to_move); + + if ((insert_position <= (node->count() - to_move)) || + ((right->count() + to_move) < kNodeValues)) { + node->rebalance_left_to_right(to_move, right, mutable_allocator()); + + if (insert_position > node->count()) { + insert_position = insert_position - node->count() - 1; + node = right; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + // Rebalancing failed, make sure there is room on the parent node for a new + // value. + assert(parent->max_count() == kNodeValues); + if (parent->count() == kNodeValues) { + iterator parent_iter(node->parent(), node->position()); + rebalance_or_split(&parent_iter); + } + } else { + // Rebalancing not possible because this is the root node. + // Create a new root node and set the current root node as the child of the + // new root. + parent = new_internal_node(parent); + parent->init_child(0, root()); + mutable_root() = parent; + // If the former root was a leaf node, then it's now the rightmost node. + assert(!parent->child(0)->leaf() || parent->child(0) == rightmost_); + } + + // Split the node. + node_type *split_node; + if (node->leaf()) { + split_node = new_leaf_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + if (rightmost_ == node) rightmost_ = split_node; + } else { + split_node = new_internal_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + } + + if (insert_position > node->count()) { + insert_position = insert_position - node->count() - 1; + node = split_node; + } + } + + template + void btree

::merge_nodes(node_type *left, node_type *right) { + left->merge(right, mutable_allocator()); + if (right->leaf()) { + if (rightmost_ == right) rightmost_ = left; + delete_leaf_node(right); + } else { + delete_internal_node(right); + } + } + + template + bool btree

::try_merge_or_rebalance(iterator *iter) { + node_type *parent = iter->node->parent(); + if (iter->node->position() > 0) { + // Try merging with our left sibling. + node_type *left = parent->child(iter->node->position() - 1); + assert(left->max_count() == kNodeValues); + if ((1 + left->count() + iter->node->count()) <= kNodeValues) { + iter->position += 1 + left->count(); + merge_nodes(left, iter->node); + iter->node = left; + return true; + } + } + if (iter->node->position() < parent->count()) { + // Try merging with our right sibling. + node_type *right = parent->child(iter->node->position() + 1); + assert(right->max_count() == kNodeValues); + if ((1 + iter->node->count() + right->count()) <= kNodeValues) { + merge_nodes(iter->node, right); + return true; + } + // Try rebalancing with our right sibling. We don't perform rebalancing if + // we deleted the first element from iter->node and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the front of the tree. + if ((right->count() > kMinNodeValues) && + ((iter->node->count() == 0) || + (iter->position > 0))) { + int to_move = (right->count() - iter->node->count()) / 2; + to_move = (std::min)(to_move, right->count() - 1); + iter->node->rebalance_right_to_left(to_move, right, mutable_allocator()); + return false; + } + } + if (iter->node->position() > 0) { + // Try rebalancing with our left sibling. We don't perform rebalancing if + // we deleted the last element from iter->node and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the back of the tree. + node_type *left = parent->child(iter->node->position() - 1); + if ((left->count() > kMinNodeValues) && + ((iter->node->count() == 0) || + (iter->position < iter->node->count()))) { + int to_move = (left->count() - iter->node->count()) / 2; + to_move = (std::min)(to_move, left->count() - 1); + left->rebalance_left_to_right(to_move, iter->node, mutable_allocator()); + iter->position += to_move; + return false; + } + } + return false; + } + + template + void btree

::try_shrink() { + if (root()->count() > 0) { + return; + } + // Deleted the last item on the root node, shrink the height of the tree. + if (root()->leaf()) { + assert(size() == 0); + delete_leaf_node(root()); + mutable_root() = EmptyNode(); + rightmost_ = EmptyNode(); + } else { + node_type *child = root()->child(0); + child->make_root(); + delete_internal_node(root()); + mutable_root() = child; + } + } + + template + template + inline IterType btree

::internal_last(IterType iter) { + assert(iter.node != nullptr); + while (iter.position == iter.node->count()) { + iter.position = iter.node->position(); + iter.node = iter.node->parent(); + if (iter.node->leaf()) { + iter.node = nullptr; + break; + } + } + return iter; + } + + template + template + inline auto btree

::internal_emplace(iterator iter, Args &&... args) + -> iterator { + if (!iter.node->leaf()) { + // We can't insert on an internal node. Instead, we'll insert after the + // previous value which is guaranteed to be on a leaf node. + --iter; + ++iter.position; + } + const int max_count = iter.node->max_count(); + if (iter.node->count() == max_count) { + // Make room in the leaf for the new item. + if (max_count < kNodeValues) { + // Insertion into the root where the root is smaller than the full node + // size. Simply grow the size of the root node. + assert(iter.node == root()); + iter.node = + new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); + iter.node->swap(root(), mutable_allocator()); + delete_leaf_node(root()); + mutable_root() = iter.node; + rightmost_ = iter.node; + } else { + rebalance_or_split(&iter); + } + } + iter.node->emplace_value(iter.position, mutable_allocator(), + std::forward(args)...); + ++size_; + return iter; + } + + template + template + inline auto btree

::internal_locate(const K &key) const + -> SearchResult { + return internal_locate_impl(key, is_key_compare_to()); + } + + template + template + inline auto btree

::internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const + -> SearchResult { + iterator iter(const_cast(root()), 0); + for (;;) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + // NOTE: we don't need to walk all the way down the tree if the keys are + // equal, but determining equality would require doing an extra comparison + // on each node on the way down, and we will need to go all the way to the + // leaf node in the expected case. + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return {iter}; + } + + template + template + inline auto btree

::internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const + -> SearchResult { + iterator iter(const_cast(root()), 0); + for (;;) { + SearchResult res = iter.node->lower_bound(key, key_comp()); + iter.position = res.value; + if (res.match == MatchKind::kEq) { + return {iter, MatchKind::kEq}; + } + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return {iter, MatchKind::kNe}; + } + + template + template + auto btree

::internal_lower_bound(const K &key) const -> iterator { + iterator iter(const_cast(root()), 0); + for (;;) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return internal_last(iter); + } + + template + template + auto btree

::internal_upper_bound(const K &key) const -> iterator { + iterator iter(const_cast(root()), 0); + for (;;) { + iter.position = iter.node->upper_bound(key, key_comp()); + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return internal_last(iter); + } + + template + template + auto btree

::internal_find(const K &key) const -> iterator { + auto res = internal_locate(key); + if (res.HasMatch()) { + if (res.IsEq()) { + return res.value; + } + } else { + const iterator iter = internal_last(res.value); + if (iter.node != nullptr && !compare_keys(key, iter.key())) { + return iter; + } + } + return {nullptr, 0}; + } + + template + void btree

::internal_clear(node_type *node) { + if (!node->leaf()) { + for (int i = 0; i <= node->count(); ++i) { + internal_clear(node->child(i)); + } + delete_internal_node(node); + } else { + delete_leaf_node(node); + } + } + + template + int btree

::internal_verify( + const node_type *node, const key_type *lo, const key_type *hi) const { + assert(node->count() > 0); + assert(node->count() <= node->max_count()); + if (lo) { + assert(!compare_keys(node->key(0), *lo)); + } + if (hi) { + assert(!compare_keys(*hi, node->key(node->count() - 1))); + } + for (int i = 1; i < node->count(); ++i) { + assert(!compare_keys(node->key(i), node->key(i - 1))); + } + int count = node->count(); + if (!node->leaf()) { + for (int i = 0; i <= node->count(); ++i) { + assert(node->child(i) != nullptr); + assert(node->child(i)->parent() == node); + assert(node->child(i)->position() == i); + count += internal_verify( + node->child(i), + (i == 0) ? lo : &node->key(i - 1), + (i == node->count()) ? hi : &node->key(i)); + } + } + return count; + } + + // A common base class for btree_set, btree_map, btree_multiset, and btree_multimap. + // --------------------------------------------------------------------------------- + template + class btree_container { + using params_type = typename Tree::params_type; + + protected: + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = + typename KeyArg::value>:: + template type; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using difference_type = typename Tree::difference_type; + using key_compare = typename Tree::key_compare; + using value_compare = typename Tree::value_compare; + using allocator_type = typename Tree::allocator_type; + using reference = typename Tree::reference; + using const_reference = typename Tree::const_reference; + using pointer = typename Tree::pointer; + using const_pointer = typename Tree::const_pointer; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using reverse_iterator = typename Tree::reverse_iterator; + using const_reverse_iterator = typename Tree::const_reverse_iterator; + using node_type = typename Tree::node_handle_type; + + // Constructors/assignments. + btree_container() : tree_(key_compare(), allocator_type()) {} + explicit btree_container(const key_compare &comp, + const allocator_type &alloc = allocator_type()) + : tree_(comp, alloc) {} + btree_container(const btree_container &x) = default; + btree_container(btree_container &&x) noexcept = default; + btree_container &operator=(const btree_container &x) = default; + btree_container &operator=(btree_container &&x) noexcept( + std::is_nothrow_move_assignable::value) = default; + + // Iterator routines. + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + const_iterator cbegin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + const_iterator cend() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + const_reverse_iterator crbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + const_reverse_iterator crend() const { return tree_.rend(); } + + // Lookup routines. + template + iterator find(const key_arg &key) { + return tree_.find(key); + } + template + const_iterator find(const key_arg &key) const { return tree_.find(key); } + + template + bool contains(const key_arg &key) const { return find(key) != end(); } + + template + iterator lower_bound(const key_arg &key) { return tree_.lower_bound(key); } + + template + const_iterator lower_bound(const key_arg &key) const { return tree_.lower_bound(key); } + + template + iterator upper_bound(const key_arg &key) { return tree_.upper_bound(key); } + + template + const_iterator upper_bound(const key_arg &key) const { return tree_.upper_bound(key); } + + template + std::pair equal_range(const key_arg &key) { return tree_.equal_range(key); } + + template + std::pair equal_range( + const key_arg &key) const { + return tree_.equal_range(key); + } + + iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); } + iterator erase(iterator iter) { return tree_.erase(iter); } + iterator erase(const_iterator first, const_iterator last) { + return tree_.erase(iterator(first), iterator(last)).second; + } + + node_type extract(iterator position) { + // Use Move instead of Transfer, because the rebalancing code expects to + // have a valid object to scribble metadata bits on top of. + auto node = CommonAccess::Move(get_allocator(), position.slot()); + erase(position); + return node; + } + + node_type extract(const_iterator position) { + return extract(iterator(position)); + } + + public: + void clear() { tree_.clear(); } + void swap(btree_container &x) { tree_.swap(x.tree_); } + void verify() const { tree_.verify(); } + + size_type size() const { return tree_.size(); } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { return tree_.empty(); } + + friend bool operator==(const btree_container &x, const btree_container &y) { + if (x.size() != y.size()) return false; + return std::equal(x.begin(), x.end(), y.begin()); + } + + friend bool operator!=(const btree_container &x, const btree_container &y) { return !(x == y); } + + friend bool operator<(const btree_container &x, const btree_container &y) { + return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); + } + + friend bool operator>(const btree_container &x, const btree_container &y) { return y < x; } + + friend bool operator<=(const btree_container &x, const btree_container &y) { return !(y < x); } + + friend bool operator>=(const btree_container &x, const btree_container &y) { return !(x < y); } + + // The allocator used by the btree. + allocator_type get_allocator() const { return tree_.get_allocator(); } + + // The key comparator used by the btree. + key_compare key_comp() const { return tree_.key_comp(); } + value_compare value_comp() const { return tree_.value_comp(); } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const btree_container &b) { + for (const auto &v : b) { + h = State::combine(std::move(h), v); + } + return State::combine(std::move(h), b.size()); + } + + protected: + Tree tree_; + }; + + // A common base class for btree_set and btree_map. + // ----------------------------------------------- + template + class btree_set_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + using insert_return_type = InsertReturnType; + using super_type::super_type; + btree_set_container() {} + + template + btree_set_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + + btree_set_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_set_container(init.begin(), init.end(), comp, alloc) {} + + // Lookup routines. + template + size_type count(const key_arg &key) const { + return this->tree_.count_unique(key); + } + + // Insertion routines. + std::pair insert(const value_type &x) { + return this->tree_.insert_unique(params_type::key(x), x); + } + std::pair insert(value_type &&x) { + return this->tree_.insert_unique(params_type::key(x), std::move(x)); + } + template + std::pair emplace(Args &&... args) { + init_type v(std::forward(args)...); + return this->tree_.insert_unique(params_type::key(v), std::move(v)); + } + iterator insert(const_iterator position, const value_type &x) { + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(x), x) + .first; + } + iterator insert(const_iterator position, value_type &&x) { + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(x), + std::move(x)) + .first; + } + + template + iterator emplace_hint(const_iterator position, Args &&... args) { + init_type v(std::forward(args)...); + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(v), + std::move(v)) + .first; + } + + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_unique(b, e); + } + + void insert(std::initializer_list init) { + this->tree_.insert_iterator_unique(init.begin(), init.end()); + } + + insert_return_type insert(node_type &&node) { + if (!node) return {this->end(), false, node_type()}; + std::pair res = + this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) { + CommonAccess::Destroy(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + std::pair res = this->tree_.insert_hint_unique( + iterator(hint), params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) CommonAccess::Destroy(&node); + return res.first; + } + + template + size_type erase(const key_arg &key) { return this->tree_.erase_unique(key); } + using super_type::erase; + + template + node_type extract(const key_arg &key) { + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); + } + + using super_type::extract; + + // Merge routines. + // Moves elements from `src` into `this`. If the element already exists in + // `this`, it is left unmodified in `src`. + template < + typename T, + typename phmap::enable_if_t< + phmap::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + for (auto src_it = src.begin(); src_it != src.end();) { + if (insert(std::move(*src_it)).second) { + src_it = src.erase(src_it); + } else { + ++src_it; + } + } + } + + template < + typename T, + typename phmap::enable_if_t< + phmap::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } + }; + + // Base class for btree_map. + // ------------------------- + template + class btree_map_container : public btree_set_container { + using super_type = btree_set_container; + using params_type = typename Tree::params_type; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using mapped_type = typename params_type::mapped_type; + using value_type = typename Tree::value_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + + // Inherit constructors. + using super_type::super_type; + btree_map_container() {} + + // Insertion routines. + template + std::pair try_emplace(const key_type &k, Args &&... args) { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(k), + std::forward_as_tuple(std::forward(args)...)); + } + template + std::pair try_emplace(key_type &&k, Args &&... args) { + // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` + // and then using `k` unsequenced. This is safe because the move is into a + // forwarding reference and insert_unique guarantees that `key` is never + // referenced after consuming `args`. + const key_type& key_ref = k; + return this->tree_.insert_unique( + key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)), + std::forward_as_tuple(std::forward(args)...)); + } + template + iterator try_emplace(const_iterator hint, const key_type &k, + Args &&... args) { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, + std::forward_as_tuple(k), + std::forward_as_tuple(std::forward(args)...)) + .first; + } + template + iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) { + // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` + // and then using `k` unsequenced. This is safe because the move is into a + // forwarding reference and insert_hint_unique guarantees that `key` is + // never referenced after consuming `args`. + const key_type& key_ref = k; + return this->tree_ + .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct, + std::forward_as_tuple(std::move(k)), + std::forward_as_tuple(std::forward(args)...)) + .first; + } + mapped_type &operator[](const key_type &k) { + return try_emplace(k).first->second; + } + mapped_type &operator[](key_type &&k) { + return try_emplace(std::move(k)).first->second; + } + + template + mapped_type &at(const key_arg &key) { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("phmap::btree_map::at"); + return it->second; + } + template + const mapped_type &at(const key_arg &key) const { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("phmap::btree_map::at"); + return it->second; + } + }; + + // A common base class for btree_multiset and btree_multimap. + template + class btree_multiset_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + + // Inherit constructors. + using super_type::super_type; + btree_multiset_container() {} + + // Range constructor. + template + btree_multiset_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + + // Initializer list constructor. + btree_multiset_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} + + // Lookup routines. + template + size_type count(const key_arg &key) const { + return this->tree_.count_multi(key); + } + + // Insertion routines. + iterator insert(const value_type &x) { return this->tree_.insert_multi(x); } + iterator insert(value_type &&x) { + return this->tree_.insert_multi(std::move(x)); + } + iterator insert(const_iterator position, const value_type &x) { + return this->tree_.insert_hint_multi(iterator(position), x); + } + iterator insert(const_iterator position, value_type &&x) { + return this->tree_.insert_hint_multi(iterator(position), std::move(x)); + } + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_multi(b, e); + } + void insert(std::initializer_list init) { + this->tree_.insert_iterator_multi(init.begin(), init.end()); + } + template + iterator emplace(Args &&... args) { + return this->tree_.insert_multi(init_type(std::forward(args)...)); + } + template + iterator emplace_hint(const_iterator position, Args &&... args) { + return this->tree_.insert_hint_multi( + iterator(position), init_type(std::forward(args)...)); + } + iterator insert(node_type &&node) { + if (!node) return this->end(); + iterator res = + this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + CommonAccess::Destroy(&node); + return res; + } + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + iterator res = this->tree_.insert_hint_multi( + iterator(hint), + std::move(params_type::element(CommonAccess::GetSlot(node)))); + CommonAccess::Destroy(&node); + return res; + } + + // Deletion routines. + template + size_type erase(const key_arg &key) { + return this->tree_.erase_multi(key); + } + using super_type::erase; + + // Node extraction routines. + template + node_type extract(const key_arg &key) { + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); + } + using super_type::extract; + + // Merge routines. + // Moves all elements from `src` into `this`. + template < + typename T, + typename phmap::enable_if_t< + phmap::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + insert(std::make_move_iterator(src.begin()), + std::make_move_iterator(src.end())); + src.clear(); + } + + template < + typename T, + typename phmap::enable_if_t< + phmap::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } + }; + + // A base class for btree_multimap. + template + class btree_multimap_container : public btree_multiset_container { + using super_type = btree_multiset_container; + using params_type = typename Tree::params_type; + + public: + using mapped_type = typename params_type::mapped_type; + + // Inherit constructors. + using super_type::super_type; + btree_multimap_container() {} + }; + +} // namespace container_internal + + + + // ---------------------------------------------------------------------- + // btree_set - default values in phmap_fwd_decl.h + // ---------------------------------------------------------------------- + template + class btree_set : public container_internal::btree_set_container< + container_internal::btree>> + { + using Base = typename btree_set::btree_set_container; + + public: + btree_set() {} + using Base::Base; + using Base::begin; + using Base::cbegin; + using Base::end; + using Base::cend; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::get_allocator; + using Base::key_comp; + using Base::value_comp; + }; + + // Swaps the contents of two `phmap::btree_set` containers. + // ------------------------------------------------------- + template + void swap(btree_set &x, btree_set &y) { + return x.swap(y); + } + + // Erases all elements that satisfy the predicate pred from the container. + // ---------------------------------------------------------------------- + template + void erase_if(btree_set &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } + } + } + + // ---------------------------------------------------------------------- + // btree_multiset - default values in phmap_fwd_decl.h + // ---------------------------------------------------------------------- + template + class btree_multiset : public container_internal::btree_multiset_container< + container_internal::btree>> + { + using Base = typename btree_multiset::btree_multiset_container; + + public: + btree_multiset() {} + using Base::Base; + using Base::begin; + using Base::cbegin; + using Base::end; + using Base::cend; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::get_allocator; + using Base::key_comp; + using Base::value_comp; + }; + + // Swaps the contents of two `phmap::btree_multiset` containers. + // ------------------------------------------------------------ + template + void swap(btree_multiset &x, btree_multiset &y) { + return x.swap(y); + } + + // Erases all elements that satisfy the predicate pred from the container. + // ---------------------------------------------------------------------- + template + void erase_if(btree_multiset &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } + } + } + + + // ---------------------------------------------------------------------- + // btree_map - default values in phmap_fwd_decl.h + // ---------------------------------------------------------------------- + template + class btree_map : public container_internal::btree_map_container< + container_internal::btree>> + { + using Base = typename btree_map::btree_map_container; + + public: + btree_map() {} + using Base::Base; + using Base::begin; + using Base::cbegin; + using Base::end; + using Base::cend; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::try_emplace; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::at; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::operator[]; + using Base::get_allocator; + using Base::key_comp; + using Base::value_comp; + }; + + // Swaps the contents of two `phmap::btree_map` containers. + // ------------------------------------------------------- + template + void swap(btree_map &x, btree_map &y) { + return x.swap(y); + } + + // ---------------------------------------------------------------------- + template + void erase_if(btree_map &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } + } + } + + // ---------------------------------------------------------------------- + // btree_multimap - default values in phmap_fwd_decl.h + // ---------------------------------------------------------------------- + template + class btree_multimap : public container_internal::btree_multimap_container< + container_internal::btree>> + { + using Base = typename btree_multimap::btree_multimap_container; + + public: + btree_multimap() {} + using Base::Base; + using Base::begin; + using Base::cbegin; + using Base::end; + using Base::cend; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::get_allocator; + using Base::key_comp; + using Base::value_comp; + }; + + // Swaps the contents of two `phmap::btree_multimap` containers. + // ------------------------------------------------------------ + template + void swap(btree_multimap &x, btree_multimap &y) { + return x.swap(y); + } + + // Erases all elements that satisfy the predicate pred from the container. + // ---------------------------------------------------------------------- + template + void erase_if(btree_multimap &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } + } + } + + +} // namespace btree + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // PHMAP_BTREE_BTREE_CONTAINER_H_ diff --git a/src/includes/3thparty/parallel_hashmap/conanfile.py b/src/includes/3thparty/parallel_hashmap/conanfile.py new file mode 100644 index 0000000..c046377 --- /dev/null +++ b/src/includes/3thparty/parallel_hashmap/conanfile.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from conans import ConanFile, tools +import os + +class SparseppConan(ConanFile): + name = "parallel_hashmap" + version = "1.27" + description = "A header-only, very fast and memory-friendly hash map" + + # Indicates License type of the packaged library + license = "https://github.com/greg7mdp/parallel-hashmap/blob/master/LICENSE" + + # Packages the license for the conanfile.py + exports = ["LICENSE"] + + # Custom attributes for Bincrafters recipe conventions + source_subfolder = "source_subfolder" + + def source(self): + source_url = "https://github.com/greg7mdp/parallel-hashmap" + tools.get("{0}/archive/{1}.tar.gz".format(source_url, self.version)) + extracted_dir = self.name + "-" + self.version + + #Rename to "source_folder" is a convention to simplify later steps + os.rename(extracted_dir, self.source_subfolder) + + + def package(self): + include_folder = os.path.join(self.source_subfolder, "parallel_hashmap") + self.copy(pattern="LICENSE") + self.copy(pattern="*", dst="include/parallel_hashmap", src=include_folder) + + def package_id(self): + self.info.header_only() diff --git a/src/includes/3thparty/parallel_hashmap/meminfo.h b/src/includes/3thparty/parallel_hashmap/meminfo.h new file mode 100644 index 0000000..872f3c6 --- /dev/null +++ b/src/includes/3thparty/parallel_hashmap/meminfo.h @@ -0,0 +1,195 @@ +#if !defined(spp_memory_h_guard) +#define spp_memory_h_guard + +#include +#include +#include + +#if defined(_WIN32) || defined( __CYGWIN__) + #define SPP_WIN +#endif + +#ifdef SPP_WIN + #include + #include + #undef min + #undef max +#elif defined(__linux__) + #include + #include +#elif defined(__FreeBSD__) + #include + #include + #include + #include + #include + #include +#endif + +namespace spp +{ + uint64_t GetSystemMemory(); + uint64_t GetTotalMemoryUsed(); + uint64_t GetProcessMemoryUsed(); + uint64_t GetPhysicalMemory(); + + uint64_t GetSystemMemory() + { +#ifdef SPP_WIN + MEMORYSTATUSEX memInfo; + memInfo.dwLength = sizeof(MEMORYSTATUSEX); + GlobalMemoryStatusEx(&memInfo); + return static_cast(memInfo.ullTotalPageFile); +#elif defined(__linux__) + struct sysinfo memInfo; + sysinfo (&memInfo); + auto totalVirtualMem = memInfo.totalram; + + totalVirtualMem += memInfo.totalswap; + totalVirtualMem *= memInfo.mem_unit; + return static_cast(totalVirtualMem); +#elif defined(__FreeBSD__) + kvm_t *kd; + u_int pageCnt; + size_t pageCntLen = sizeof(pageCnt); + u_int pageSize; + struct kvm_swap kswap; + uint64_t totalVirtualMem; + + pageSize = static_cast(getpagesize()); + + sysctlbyname("vm.stats.vm.v_page_count", &pageCnt, &pageCntLen, NULL, 0); + totalVirtualMem = pageCnt * pageSize; + + kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open"); + kvm_getswapinfo(kd, &kswap, 1, 0); + kvm_close(kd); + totalVirtualMem += kswap.ksw_total * pageSize; + + return totalVirtualMem; +#else + return 0; +#endif + } + + uint64_t GetTotalMemoryUsed() + { +#ifdef SPP_WIN + MEMORYSTATUSEX memInfo; + memInfo.dwLength = sizeof(MEMORYSTATUSEX); + GlobalMemoryStatusEx(&memInfo); + return static_cast(memInfo.ullTotalPageFile - memInfo.ullAvailPageFile); +#elif defined(__linux__) + struct sysinfo memInfo; + sysinfo(&memInfo); + auto virtualMemUsed = memInfo.totalram - memInfo.freeram; + + virtualMemUsed += memInfo.totalswap - memInfo.freeswap; + virtualMemUsed *= memInfo.mem_unit; + + return static_cast(virtualMemUsed); +#elif defined(__FreeBSD__) + kvm_t *kd; + u_int pageSize; + u_int pageCnt, freeCnt; + size_t pageCntLen = sizeof(pageCnt); + size_t freeCntLen = sizeof(freeCnt); + struct kvm_swap kswap; + uint64_t virtualMemUsed; + + pageSize = static_cast(getpagesize()); + + sysctlbyname("vm.stats.vm.v_page_count", &pageCnt, &pageCntLen, NULL, 0); + sysctlbyname("vm.stats.vm.v_free_count", &freeCnt, &freeCntLen, NULL, 0); + virtualMemUsed = (pageCnt - freeCnt) * pageSize; + + kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open"); + kvm_getswapinfo(kd, &kswap, 1, 0); + kvm_close(kd); + virtualMemUsed += kswap.ksw_used * pageSize; + + return virtualMemUsed; +#else + return 0; +#endif + } + + uint64_t GetProcessMemoryUsed() + { +#ifdef SPP_WIN + PROCESS_MEMORY_COUNTERS_EX pmc; + GetProcessMemoryInfo(GetCurrentProcess(), reinterpret_cast(&pmc), sizeof(pmc)); + return static_cast(pmc.PrivateUsage); +#elif defined(__linux__) + auto parseLine = + [](char* line)->int + { + auto i = strlen(line); + + while(*line < '0' || *line > '9') + { + line++; + } + + line[i-3] = '\0'; + i = atoi(line); + return i; + }; + + auto file = fopen("/proc/self/status", "r"); + auto result = -1; + char line[128]; + + while(fgets(line, 128, file) != nullptr) + { + if(strncmp(line, "VmSize:", 7) == 0) + { + result = parseLine(line); + break; + } + } + + fclose(file); + return static_cast(result) * 1024; +#elif defined(__FreeBSD__) + struct kinfo_proc info; + size_t infoLen = sizeof(info); + int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid() }; + + sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &infoLen, NULL, 0); + return static_cast(info.ki_rssize * getpagesize()); +#else + return 0; +#endif + } + + uint64_t GetPhysicalMemory() + { +#ifdef SPP_WIN + MEMORYSTATUSEX memInfo; + memInfo.dwLength = sizeof(MEMORYSTATUSEX); + GlobalMemoryStatusEx(&memInfo); + return static_cast(memInfo.ullTotalPhys); +#elif defined(__linux__) + struct sysinfo memInfo; + sysinfo(&memInfo); + + auto totalPhysMem = memInfo.totalram; + + totalPhysMem *= memInfo.mem_unit; + return static_cast(totalPhysMem); +#elif defined(__FreeBSD__) + u_long physMem; + size_t physMemLen = sizeof(physMem); + int mib[] = { CTL_HW, HW_PHYSMEM }; + + sysctl(mib, sizeof(mib) / sizeof(*mib), &physMem, &physMemLen, NULL, 0); + return physMem; +#else + return 0; +#endif + } + +} + +#endif // spp_memory_h_guard diff --git a/src/includes/3thparty/parallel_hashmap/phmap.h b/src/includes/3thparty/parallel_hashmap/phmap.h new file mode 100644 index 0000000..3e3e66a --- /dev/null +++ b/src/includes/3thparty/parallel_hashmap/phmap.h @@ -0,0 +1,4382 @@ +#if !defined(phmap_h_guard_) +#define phmap_h_guard_ + +// --------------------------------------------------------------------------- +// Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp) +// with modifications. +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// --------------------------------------------------------------------------- + +#ifdef _MSC_VER + #pragma warning(push) + + #pragma warning(disable : 4127) // conditional expression is constant + #pragma warning(disable : 4324) // structure was padded due to alignment specifier + #pragma warning(disable : 4514) // unreferenced inline function has been removed + #pragma warning(disable : 4623) // default constructor was implicitly defined as deleted + #pragma warning(disable : 4625) // copy constructor was implicitly defined as deleted + #pragma warning(disable : 4626) // assignment operator was implicitly defined as deleted + #pragma warning(disable : 4710) // function not inlined + #pragma warning(disable : 4711) // selected for automatic inline expansion + #pragma warning(disable : 4820) // '6' bytes padding added after data member + #pragma warning(disable : 4868) // compiler may not enforce left-to-right evaluation order in braced initializer list + #pragma warning(disable : 5027) // move assignment operator was implicitly defined as deleted + #pragma warning(disable : 5045) // Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phmap_fwd_decl.h" +#include "phmap_utils.h" +#include "phmap_base.h" + +#if PHMAP_HAVE_STD_STRING_VIEW + #include +#endif + +namespace phmap { + +namespace container_internal { + +// -------------------------------------------------------------------------- +template +class probe_seq +{ +public: + probe_seq(size_t hash, size_t mask) { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + size_t offset() const { return offset_; } + size_t offset(size_t i) const { return (offset_ + i) & mask_; } + + void next() { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index. The i-th probe in the probe sequence. + size_t index() const { return index_; } + +private: + size_t mask_; + size_t offset_; + size_t index_ = 0; +}; + +// -------------------------------------------------------------------------- +template +struct RequireUsableKey +{ + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), + std::declval()))>* + operator()(const PassedKey&, const Args&...) const; +}; + +// -------------------------------------------------------------------------- +template +struct IsDecomposable : std::false_type {}; + +template +struct IsDecomposable< + phmap::void_t(), + std::declval()...))>, + Policy, Hash, Eq, Ts...> : std::true_type {}; + +// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. +// -------------------------------------------------------------------------- +template +constexpr bool IsNoThrowSwappable() { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); +} + +// -------------------------------------------------------------------------- +template +int TrailingZeros(T x) { + return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64( + static_cast(x)) + : base_internal::CountTrailingZerosNonZero32( + static_cast(x)); +} + +// -------------------------------------------------------------------------- +template +int LeadingZeros(T x) { + return sizeof(T) == 8 + ? base_internal::CountLeadingZeros64(static_cast(x)) + : base_internal::CountLeadingZeros32(static_cast(x)); +} + +// -------------------------------------------------------------------------- +// An abstraction over a bitmask. It provides an easy way to iterate through the +// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), +// this is a true bitmask. On non-SSE, platforms the arithematic used to +// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as +// either 0x00 or 0x80. +// +// For example: +// for (int i : BitMask(0x5)) -> yields 0, 2 +// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +// -------------------------------------------------------------------------- +template +class BitMask +{ + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + +public: + // These are useful for unit tests (gunit). + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + explicit BitMask(T mask) : mask_(mask) {} + BitMask& operator++() { + mask_ &= (mask_ - 1); + return *this; + } + explicit operator bool() const { return mask_ != 0; } + int operator*() const { return LowestBitSet(); } + int LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + int HighestBitSet() const { + return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - + 1) >> + Shift; + } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + + int TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + int LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; + } + +private: + friend bool operator==(const BitMask& a, const BitMask& b) { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) { + return a.mask_ != b.mask_; + } + + T mask_; +}; + +// -------------------------------------------------------------------------- +using ctrl_t = signed char; +using h2_t = uint8_t; + +// -------------------------------------------------------------------------- +// The values here are selected for maximum performance. See the static asserts +// below for details. +// -------------------------------------------------------------------------- +enum Ctrl : ctrl_t +{ + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 +}; + +static_assert( + kEmpty & kDeleted & kSentinel & 0x80, + "Special markers need to have the MSB to make checking for them efficient"); +static_assert(kEmpty < kSentinel && kDeleted < kSentinel, + "kEmpty and kDeleted must be smaller than kSentinel to make the " + "SIMD test of IsEmptyOrDeleted() efficient"); +static_assert(kSentinel == -1, + "kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(kEmpty == -128, + "kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); +static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, + "kEmpty and kDeleted must share an unset bit that is not shared " + "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " + "efficient"); +static_assert(kDeleted == -2, + "kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + +// -------------------------------------------------------------------------- +// A single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). +// -------------------------------------------------------------------------- +inline ctrl_t* EmptyGroup() { + alignas(16) static constexpr ctrl_t empty_group[] = { + kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, + kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; + return const_cast(empty_group); +} + +// -------------------------------------------------------------------------- +inline size_t HashSeed(const ctrl_t* ctrl) { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; +} + +#ifdef PHMAP_NON_DETERMINISTIC + +inline size_t H1(size_t hash, const ctrl_t* ctrl) { + // use ctrl_ pointer to add entropy to ensure + // non-deterministic iteration order. + return (hash >> 7) ^ HashSeed(ctrl); +} + +#else + +inline size_t H1(size_t hash, const ctrl_t* ) { + return (hash >> 7); +} + +#endif + + +inline ctrl_t H2(size_t hash) { return (ctrl_t)(hash & 0x7F); } + +inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= 0; } +inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } + +#if PHMAP_HAVE_SSE2 + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4365) // conversion from 'int' to 'T', signed/unsigned mismatch +#endif + +// -------------------------------------------------------------------------- +// https://github.com/abseil/abseil-cpp/issues/209 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 +// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char +// Work around this by using the portable implementation of Group +// when using -funsigned-char under GCC. +// -------------------------------------------------------------------------- +inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { +#if defined(__GNUC__) && !defined(__clang__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Woverflow" + + if (std::is_unsigned::value) { + const __m128i mask = _mm_set1_epi8(static_cast(0x80)); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } + + #pragma GCC diagnostic pop +#endif + return _mm_cmpgt_epi8(a, b); +} + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +struct GroupSse2Impl +{ + enum { kWidth = 16 }; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + // ---------------------------------------------------------------------- + BitMask Match(h2_t hash) const { + auto match = _mm_set1_epi8((char)hash); + return BitMask( + _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); + } + + // Returns a bitmask representing the positions of empty slots. + // ------------------------------------------------------------ + BitMask MatchEmpty() const { +#if PHMAP_HAVE_SSSE3 + // This only works because kEmpty is -128. + return BitMask( + _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); +#else + return Match(static_cast(kEmpty)); +#endif + } + + // Returns a bitmask representing the positions of empty or deleted slots. + // ----------------------------------------------------------------------- + BitMask MatchEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return BitMask( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))); + } + + // Returns the number of trailing empty or deleted elements in the group. + // ---------------------------------------------------------------------- + uint32_t CountLeadingEmptyOrDeleted() const { + auto special = _mm_set1_epi8(kSentinel); + return TrailingZeros( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1); + } + + // ---------------------------------------------------------------------- + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); +#if PHMAP_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; +}; + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +#endif // PHMAP_HAVE_SSE2 + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +struct GroupPortableImpl +{ + enum { kWidth = 8 }; + + explicit GroupPortableImpl(const ctrl_t* pos) + : ctrl(little_endian::Load64(pos)) {} + + BitMask Match(h2_t hash) const { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on kEmpty, kDeleted, kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + BitMask MatchEmpty() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 6)) & msbs); + } + + BitMask MatchEmptyOrDeleted() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl & (~ctrl << 7)) & msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; + return (uint32_t)((TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; +}; + +#if PHMAP_HAVE_SSE2 + using Group = GroupSse2Impl; +#else + using Group = GroupPortableImpl; +#endif + +template +class raw_hash_set; + +inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } + +// -------------------------------------------------------------------------- +// PRECONDITION: +// IsValidCapacity(capacity) +// ctrl[capacity] == kSentinel +// ctrl[i] != kSentinel for all i < capacity +// Applies mapping for every byte in ctrl: +// DELETED -> EMPTY +// EMPTY -> EMPTY +// FULL -> DELETED +// -------------------------------------------------------------------------- +inline void ConvertDeletedToEmptyAndFullToDeleted( + ctrl_t* ctrl, size_t capacity) +{ + assert(ctrl[capacity] == kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); + ctrl[capacity] = kSentinel; +} + +// -------------------------------------------------------------------------- +// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. +// -------------------------------------------------------------------------- +inline size_t NormalizeCapacity(size_t n) +{ + return n ? ~size_t{} >> LeadingZeros(n) : 1; +} + +// -------------------------------------------------------------------------- +// We use 7/8th as maximum load factor. +// For 16-wide groups, that gives an average of two empty slots per group. +// -------------------------------------------------------------------------- +inline size_t CapacityToGrowth(size_t capacity) +{ + assert(IsValidCapacity(capacity)); + // `capacity*7/8` + PHMAP_IF_CONSTEXPR (Group::kWidth == 8 && capacity == 7) { + // x-x/8 does not work when x==7. + return 6; + } + return capacity - capacity / 8; +} + +// -------------------------------------------------------------------------- +// From desired "growth" to a lowerbound of the necessary capacity. +// Might not be a valid one and required NormalizeCapacity(). +// -------------------------------------------------------------------------- +inline size_t GrowthToLowerboundCapacity(size_t growth) +{ + // `growth*8/7` + PHMAP_IF_CONSTEXPR (Group::kWidth == 8 && growth == 7) { + // x+(x-1)/7 does not work when x==7. + return 8; + } + return growth + static_cast((static_cast(growth) - 1) / 7); +} + +namespace hashtable_debug_internal { + +// If it is a map, call get<0>(). +using std::get; +template +auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { + return get<0>(pair); +} + +// If it is not a map, return the value directly. +template +const typename T::key_type& GetKey(const typename T::key_type& key, char) { + return key; +} + +// -------------------------------------------------------------------------- +// Containers should specialize this to provide debug information for that +// container. +// -------------------------------------------------------------------------- +template +struct HashtableDebugAccess +{ + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + // -------------------------------------------------------------------------- + static size_t GetNumProbes(const Container& c, + const typename Container::key_type& key) { + if (!c.bucket_count()) return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { + if (it == e) return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; + } + } +}; + +} // namespace hashtable_debug_internal + +// ---------------------------------------------------------------------------- +// I N F O Z S T U B S +// ---------------------------------------------------------------------------- +struct HashtablezInfo +{ + void PrepareForSampling() {} +}; + +inline void RecordRehashSlow(HashtablezInfo*, size_t ) {} + +static inline void RecordInsertSlow(HashtablezInfo* , size_t, size_t ) {} + +static inline void RecordEraseSlow(HashtablezInfo*) {} + +static inline HashtablezInfo* SampleSlow(int64_t*) { return nullptr; } +static inline void UnsampleSlow(HashtablezInfo* ) {} + +class HashtablezInfoHandle +{ +public: + inline void RecordStorageChanged(size_t , size_t ) {} + inline void RecordRehash(size_t ) {} + inline void RecordInsert(size_t , size_t ) {} + inline void RecordErase() {} + friend inline void swap(HashtablezInfoHandle& , + HashtablezInfoHandle& ) noexcept {} +}; + +static inline HashtablezInfoHandle Sample() { return HashtablezInfoHandle(); } + +class HashtablezSampler +{ +public: + // Returns a global Sampler. + static HashtablezSampler& Global() { static HashtablezSampler hzs; return hzs; } + HashtablezInfo* Register() { static HashtablezInfo info; return &info; } + void Unregister(HashtablezInfo* ) {} + + using DisposeCallback = void (*)(const HashtablezInfo&); + DisposeCallback SetDisposeCallback(DisposeCallback ) { return nullptr; } + int64_t Iterate(const std::function& ) { return 0; } +}; + +static inline void SetHashtablezEnabled(bool ) {} +static inline void SetHashtablezSampleParameter(int32_t ) {} +static inline void SetHashtablezMaxSamples(int32_t ) {} + + +namespace memory_internal { + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +// ---------------------------------------------------------------------------- +template +void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, + phmap::index_sequence) { + phmap::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))...); +} + +template +struct WithConstructedImplF { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args) const { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; +}; + +template +decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, phmap::index_sequence, F&& f) { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))...); +} + +template +auto TupleRefImpl(T&& t, phmap::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { + return std::forward_as_tuple(std::get(std::forward(t))...); +} + +// Returns a tuple of references to the elements of the input tuple. T must be a +// tuple. +// ---------------------------------------------------------------------------- +template +auto TupleRef(T&& t) -> decltype( + TupleRefImpl(std::forward(t), + phmap::make_index_sequence< + std::tuple_size::type>::value>())) { + return TupleRefImpl( + std::forward(t), + phmap::make_index_sequence< + std::tuple_size::type>::value>()); +} + +template +decltype(std::declval()(std::declval(), std::piecewise_construct, + std::declval>(), std::declval())) +DecomposePairImpl(F&& f, std::pair, V> p) { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); +} + +} // namespace memory_internal + + +// ---------------------------------------------------------------------------- +// R A W _ H A S H _ S E T +// ---------------------------------------------------------------------------- +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template +// size_type erase(const K& key); +// +// std::pair equal_range(const key_type& key); +// template +// std::pair equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// The table stores elements inline in a slot array. In addition to the slot +// array the table maintains some control state per slot. The extra state is one +// byte per slot and stores empty or deleted marks, or alternatively 7 bits from +// the hash of an occupied slot. The table is split into logical groups of +// slots, like so: +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// On lookup the hash is split into two parts: +// - H2: 7 bits (those stored in the control bytes) +// - H1: the rest of the bits +// The groups are probed using H1. For each group the slots are matched to H2 in +// parallel. Because H2 is 7 bits (128 states) and the number of slots per group +// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. +// +// On insert, once the right group is found (as in lookup), its slots are +// filled in order. +// +// On erase a slot is cleared. In case the group did not have any empty slots +// before the erase, the erased slot is marked as deleted. +// +// Groups without empty slots (but maybe with deleted slots) extend the probe +// sequence. The probing algorithm is quadratic. Given N the number of groups, +// the probing function for the i'th probe is: +// +// P(0) = H1 % N +// +// P(i) = (P(i - 1) + i) % N +// +// This probing function guarantees that after N probes, all the groups of the +// table will be probed exactly once. +// ---------------------------------------------------------------------------- +template +class raw_hash_set +{ + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + +public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename phmap::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename phmap::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + +private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using Layout = phmap::container_internal::Layout; + + static Layout MakeLayout(size_t capacity) { + assert(IsValidCapacity(capacity)); + return Layout(capacity + Group::kWidth + 1, capacity); + } + + using AllocTraits = phmap::allocator_traits; + using SlotAlloc = typename phmap::allocator_traits< + allocator_type>::template rebind_alloc; + using SlotAllocTraits = typename phmap::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, + "Policy::element() must return a reference"); + + template + struct SameAsElementReference + : std::is_same::type>::type, + typename std::remove_cv< + typename std::remove_reference::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + phmap::disjunction, + SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + +public: + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + + class iterator + { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + phmap::conditional_t; + using pointer = phmap::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() {} + + // PRECONDITION: not an end() iterator. + reference operator*() const { return PolicyTraits::element(slot_); } + + // PRECONDITION: not an end() iterator. + pointer operator->() const { return &operator*(); } + + // PRECONDITION: not an end() iterator. + iterator& operator++() { + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() + iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} + + void skip_empty_or_deleted() { + while (IsEmptyOrDeleted(*ctrl_)) { + // ctrl is not necessarily aligned to Group::kWidth. It is also likely + // to read past the space for ctrl bytes and into slots. This is ok + // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there + // is no way to read outside the combined slot array. + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + } + + ctrl_t* ctrl_ = nullptr; + // To avoid uninitialized member warnigs, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union { + slot_type* slot_; + }; + }; + + class const_iterator + { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() {} + // Implicit construction from iterator. + const_iterator(iterator i) : inner_(std::move(i)) {} + + reference operator*() const { return *inner_; } + pointer operator->() const { return inner_.operator->(); } + + const_iterator& operator++() { + ++inner_; + return *this; + } + const_iterator operator++(int) { return inner_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot) + : inner_(const_cast(ctrl), const_cast(slot)) {} + + iterator inner_; + }; + + using node_type = node_handle, Alloc>; + using insert_return_type = InsertReturnType; + + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value) {} + + explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { + if (bucket_count) { + capacity_ = NormalizeCapacity(bucket_count); + reset_growth_left(); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, + const allocator_type& alloc) + : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) + : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit raw_hash_set(const allocator_type& alloc) + : raw_hash_set(0, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(bucket_count, hash, eq, alloc) { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // phmap::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // phmap::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, + const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(const raw_hash_set& that) + : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) + : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + reserve(that.size()); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + auto target = find_first_non_full(hash); + set_ctrl(target.offset, H2(hash)); + emplace_at(target.offset, v); + infoz_.RecordInsert(hash, target.probe_length); + } + size_ = that.size(); + growth_left() -= that.size(); + } + + raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value) + : ctrl_(phmap::exchange(that.ctrl_, EmptyGroup())), + slots_(phmap::exchange(that.slots_, nullptr)), + size_(phmap::exchange(that.size_, 0)), + capacity_(phmap::exchange(that.capacity_, 0)), + infoz_(phmap::exchange(that.infoz_, HashtablezInfoHandle())), + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + settings_(that.settings_) { + // growth_left was copied above, reset the one from `that`. + that.growth_left() = 0; + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) + : ctrl_(EmptyGroup()), + slots_(nullptr), + size_(0), + capacity_(0), + settings_(0, that.hash_ref(), that.eq_ref(), a) { + if (a == that.alloc_ref()) { + std::swap(ctrl_, that.ctrl_); + std::swap(slots_, that.slots_); + std::swap(size_, that.size_); + std::swap(capacity_, that.capacity_); + std::swap(growth_left(), that.growth_left()); + std::swap(infoz_, that.infoz_); + } else { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) { + raw_hash_set tmp(that, + AllocTraits::propagate_on_container_copy_assignment::value + ? that.alloc_ref() + : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + phmap::allocator_traits::is_always_equal::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_assignable::value) { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment()); + } + + ~raw_hash_set() { destroy_slots(); } + + iterator begin() { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() { return {ctrl_ + capacity_}; } + + const_iterator begin() const { + return const_cast(this)->begin(); + } + const_iterator end() const { return const_cast(this)->end(); } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + bool empty() const { return !size(); } + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + size_t max_size() const { return (std::numeric_limits::max)(); } + + PHMAP_ATTRIBUTE_REINITIALIZES void clear() { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + if (empty()) + return; + if (capacity_ > 127) { + destroy_slots(); + } else if (capacity_) { + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + size_ = 0; + reset_ctrl(); + reset_growth_left(); + } + assert(empty()); + infoz_.RecordStorageChanged(0, capacity_); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + std::pair insert(T&& value) { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_set s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) { + return emplace(std::move(value)); + } + + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) { + return insert(std::forward(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) insert(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, + elem); + if (res.second) { + CommonAccess::Reset(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + insert_return_type insert(node_type&& node, size_t hash) { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); + auto res = PolicyTraits::apply( + InsertSlotWithHash{*this, std::move(*CommonAccess::GetSlot(node)), hash}, + elem); + if (res.second) { + CommonAccess::Reset(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) { + return insert(std::move(node)).first; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls f with one argument of type raw_hash_set::constructor. f + // MUST call raw_hash_set::constructor with arguments as if a + // raw_hash_set::value_type is constructed, otherwise the behavior is + // undefined. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor + { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, F&& f) { + auto res = find_or_prepare_insert(key); + if (res.second) { + slot_type* slot = slots_ + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + template + iterator lazy_emplace_with_hash(const key_arg& key, size_t &hash, F&& f) { + auto res = find_or_prepare_insert(key, hash); + if (res.second) { + slot_type* slot = slots_ + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) { + auto it = find(key); + if (it == end()) return 0; + _erase(it); + return 1; + } + + + iterator erase(const_iterator cit) { return erase(cit.inner_); } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). In + // order to erase while iterating across a map, use the following idiom (which + // also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // if () { + // m._erase(it++); + // } else { + // ++it; + // } + // } + void _erase(iterator it) { + assert(it != end()); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + void _erase(const_iterator cit) { _erase(cit.inner_); } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + iterator erase(iterator it) { _erase(it++); return it; } + + iterator erase(const_iterator first, const_iterator last) { + while (first != last) { + _erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e; ++it) { + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, + PolicyTraits::element(it.slot_)) + .second) { + src.erase_meta_only(it); + } + } + } + + template + void merge(raw_hash_set&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + auto node = + CommonAccess::Make(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template < + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + (!AllocTraits::propagate_on_container_swap::value || + IsNoThrowSwappable())) { + using std::swap; + swap(ctrl_, that.ctrl_); + swap(slots_, that.slots_); + swap(size_, that.size_); + swap(capacity_, that.capacity_); + swap(growth_left(), that.growth_left()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + swap(infoz_, that.infoz_); + if (AllocTraits::propagate_on_container_swap::value) { + swap(alloc_ref(), that.alloc_ref()); + } else { + // If the allocators do not compare equal it is officially undefined + // behavior. We choose to do nothing. + } + } + +#ifndef PHMAP_NON_DETERMINISTIC + template + bool dump(OutputArchive&); + + template + bool load(InputArchive&); +#endif + + void rehash(size_t n) { + if (n == 0 && capacity_ == 0) return; + if (n == 0 && size_ == 0) { + destroy_slots(); + infoz_.RecordStorageChanged(0, 0); + return; + } + // bitor is a faster way of doing `max` here. We will round up to the next + // power-of-2-minus-1, so bitor is good enough. + auto m = NormalizeCapacity((std::max)(n, size())); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity_) { + resize(m); + } + } + + void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const { + return find(key) == end() ? size_t(0) : size_t(1); + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + void prefetch_hash(size_t hash) const { + (void)hash; +#if defined(__GNUC__) + auto seq = probe(hash); + __builtin_prefetch(static_cast(ctrl_ + seq.offset())); + __builtin_prefetch(static_cast(slots_ + seq.offset())); +#endif // __GNUC__ + } + + template + void prefetch(const key_arg& key) const { + prefetch_hash(HashElement{hash_ref()}(key)); + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match((h2_t)H2(hash))) { + if (PHMAP_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset((size_t)i))))) + return iterator_at(seq.offset((size_t)i)); + } + if (PHMAP_PREDICT_TRUE(g.MatchEmpty())) + return end(); + seq.next(); + } + } + template + iterator find(const key_arg& key) { + return find(key, HashElement{hash_ref()}(key)); + } + + template + const_iterator find(const key_arg& key, size_t hash) const { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const { + return find(key, HashElement{hash_ref()}(key)); + } + + template + bool contains(const key_arg& key) const { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key) const { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { return capacity_; } + float load_factor() const { + return capacity_ ? static_cast(size()) / capacity_ : 0.0; + } + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { + if (a.size() != b.size()) return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) + std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { + return !(a == b); + } + + friend void swap(raw_hash_set& a, + raw_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + +private: + template + friend struct phmap::container_internal::hashtable_debug_internal::HashtableDebugAccess; + + struct FindElement + { + template + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement + { + template + size_t operator()(const K& key, Args&&...) const { + return phmap_mix()(h(key)); + } + const hasher& h; + }; + + template + struct EqualElement + { + template + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + template + std::pair emplace_decomposable(const K& key, size_t hash, + Args&&... args) + { + auto res = find_or_prepare_insert(key, hash); + if (res.second) { + emplace_at(res.first, std::forward(args)...); + } + return {iterator_at(res.first), res.second}; + } + + struct EmplaceDecomposable + { + template + std::pair operator()(const K& key, Args&&... args) const { + return s.emplace_decomposable(key, typename raw_hash_set::HashElement{s.hash_ref()}(key), + std::forward(args)...); + } + raw_hash_set& s; + }; + + template + struct InsertSlot + { + template + std::pair operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } else if (do_destroy) { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + template + struct InsertSlotWithHash + { + template + std::pair operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key, hash); + if (res.second) { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } else if (do_destroy) { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + size_t &hash; + }; + + // "erases" the object from the container, except that it doesn't actually + // destroy the object. It only updates all the metadata of the class. + // This can be used in conjunction with Policy::transfer to move the object to + // another place. + void erase_meta_only(const_iterator it) { + assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); + --size_; + const size_t index = (size_t)(it.inner_.ctrl_ - ctrl_); + const size_t index_before = (index - Group::kWidth) & capacity_; + const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + bool was_never_full = + empty_before && empty_after && + static_cast(empty_after.TrailingZeros() + + empty_before.LeadingZeros()) < Group::kWidth; + + set_ctrl(index, was_never_full ? kEmpty : kDeleted); + growth_left() += was_never_full; + infoz_.RecordErase(); + } + + void initialize_slots() { + assert(capacity_); + if (std::is_same>::value && + slots_ == nullptr) { + infoz_ = Sample(); + } + + auto layout = MakeLayout(capacity_); + char* mem = static_cast( + Allocate(&alloc_ref(), layout.AllocSize())); + ctrl_ = reinterpret_cast(layout.template Pointer<0>(mem)); + slots_ = layout.template Pointer<1>(mem); + reset_ctrl(); + reset_growth_left(); + infoz_.RecordStorageChanged(size_, capacity_); + } + + void destroy_slots() { + if (!capacity_) return; + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + auto layout = MakeLayout(capacity_); + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + Deallocate(&alloc_ref(), ctrl_, layout.AllocSize()); + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } + + void resize(size_t new_capacity) { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = ctrl_; + auto* old_slots = slots_; + const size_t old_capacity = capacity_; + capacity_ = new_capacity; + initialize_slots(); + + for (size_t i = 0; i != old_capacity; ++i) { + if (IsFull(old_ctrl[i])) { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(old_slots + i)); + auto target = find_first_non_full(hash); + size_t new_i = target.offset; + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); + } + } + if (old_capacity) { + SanitizerUnpoisonMemoryRegion(old_slots, + sizeof(slot_type) * old_capacity); + auto layout = MakeLayout(old_capacity); + Deallocate(&alloc_ref(), old_ctrl, + layout.AllocSize()); + } + } + + void drop_deletes_without_resize() PHMAP_ATTRIBUTE_NOINLINE { + assert(IsValidCapacity(capacity_)); + assert(!is_small()); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + for (size_t i = 0; i != capacity_; ++i) { + if (!IsDeleted(ctrl_[i])) continue; + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(slots_ + i)); + auto target = find_first_non_full(hash); + size_t new_i = target.offset; + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const auto probe_index = [&](size_t pos) { + return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; + }; + + // Element doesn't move. + if (PHMAP_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { + set_ctrl(i, H2(hash)); + continue; + } + if (IsEmpty(ctrl_[new_i])) { + // Transfer element to the empty spot. + // set_ctrl poisons/unpoisons the slots so we have to call it at the + // right time. + set_ctrl(new_i, H2(hash)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); + set_ctrl(i, kEmpty); + } else { + assert(IsDeleted(ctrl_[new_i])); + set_ctrl(new_i, H2(hash)); + // Until we are done rehashing, DELETED marks previously FULL slots. + // Swap i and new_i elements. + PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); + PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); + --i; // repeat + } + } + reset_growth_left(); + } + + void rehash_and_grow_if_necessary() { + if (capacity_ == 0) { + resize(1); + } else if (size() <= CapacityToGrowth(capacity()) / 2) { + // Squash DELETED without growing if there is enough capacity. + drop_deletes_without_resize(); + } else { + // Otherwise grow the container. + resize(capacity_ * 2 + 1); + } + } + + bool has_element(const value_type& elem, size_t hash) const { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match((h2_t)H2(hash))) { + if (PHMAP_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset((size_t)i)) == + elem)) + return true; + } + if (PHMAP_PREDICT_TRUE(g.MatchEmpty())) return false; + seq.next(); + assert(seq.index() < capacity_ && "full table!"); + } + return false; + } + + bool has_element(const value_type& elem) const { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + return has_element(elem, hash); + } + + // Probes the raw_hash_set with the probe sequence for hash and returns the + // pointer to the first empty or deleted slot. + // NOTE: this function must work with tables having both kEmpty and kDelete + // in one group. Such tables appears during drop_deletes_without_resize. + // + // This function is very useful when insertions happen and: + // - the input is already a set + // - there are enough slots + // - the element with the hash is not in the table + struct FindInfo + { + size_t offset; + size_t probe_length; + }; + FindInfo find_first_non_full(size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + auto mask = g.MatchEmptyOrDeleted(); + if (mask) { + return {seq.offset((size_t)mask.LowestBitSet()), seq.index()}; + } + assert(seq.index() < capacity_ && "full table!"); + seq.next(); + } + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + +protected: + template + std::pair find_or_prepare_insert(const K& key, size_t hash) { + auto seq = probe(hash); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (int i : g.Match((h2_t)H2(hash))) { + if (PHMAP_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset((size_t)i))))) + return {seq.offset((size_t)i), false}; + } + if (PHMAP_PREDICT_TRUE(g.MatchEmpty())) break; + seq.next(); + } + return {prepare_insert(hash), true}; + } + + template + std::pair find_or_prepare_insert(const K& key) { + return find_or_prepare_insert(key, HashElement{hash_ref()}(key)); + } + + size_t prepare_insert(size_t hash) PHMAP_ATTRIBUTE_NOINLINE { + auto target = find_first_non_full(hash); + if (PHMAP_PREDICT_FALSE(growth_left() == 0 && + !IsDeleted(ctrl_[target.offset]))) { + rehash_and_grow_if_necessary(); + target = find_first_non_full(hash); + } + ++size_; + growth_left() -= IsEmpty(ctrl_[target.offset]); + set_ctrl(target.offset, H2(hash)); + infoz_.RecordInsert(hash, target.probe_length); + return target.offset; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slots_ + i, + std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == + iterator_at(i) && + "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } + const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } + +private: + friend struct RawHashSetTestOnlyAccess; + + probe_seq probe(size_t hash) const { + return probe_seq(H1(hash, ctrl_), capacity_); + } + + // Reset all ctrl bytes back to kEmpty, except the sentinel. + void reset_ctrl() { + std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); + ctrl_[capacity_] = kSentinel; + SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + } + + void reset_growth_left() { + growth_left() = CapacityToGrowth(capacity()) - size_; + } + + // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at + // the end too. + void set_ctrl(size_t i, ctrl_t h) { + assert(i < capacity_); + + if (IsFull(h)) { + SanitizerUnpoisonObject(slots_ + i); + } else { + SanitizerPoisonObject(slots_ + i); + } + + ctrl_[i] = h; + ctrl_[((i - Group::kWidth) & capacity_) + 1 + + ((Group::kWidth - 1) & capacity_)] = h; + } + + size_t& growth_left() { return settings_.template get<0>(); } + + template class RefSet, + class M, class P, class H, class E, class A> + friend class parallel_hash_set; + + template class RefSet, + class M, class P, class H, class E, class A> + friend class parallel_hash_map; + + // The representation of the object has two modes: + // - small: For capacities < kWidth-1 + // - large: For the rest. + // + // Differences: + // - In small mode we are able to use the whole capacity. The extra control + // bytes give us at least one "empty" control byte to stop the iteration. + // This is important to make 1 a valid capacity. + // + // - In small mode only the first `capacity()` control bytes after the + // sentinel are valid. The rest contain dummy kEmpty values that do not + // represent a real slot. This is important to take into account on + // find_first_non_full(), where we never try ShouldInsertBackwards() for + // small tables. + bool is_small() const { return capacity_ < Group::kWidth - 1; } + + hasher& hash_ref() { return settings_.template get<1>(); } + const hasher& hash_ref() const { return settings_.template get<1>(); } + key_equal& eq_ref() { return settings_.template get<2>(); } + const key_equal& eq_ref() const { return settings_.template get<2>(); } + allocator_type& alloc_ref() { return settings_.template get<3>(); } + const allocator_type& alloc_ref() const { + return settings_.template get<3>(); + } + + // TODO(alkis): Investigate removing some of these fields: + // - ctrl/slots can be derived from each other + // - size can be moved into the slot array + ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t] + slot_type* slots_ = nullptr; // [capacity * slot_type] + size_t size_ = 0; // number of full slots + size_t capacity_ = 0; // total number of slots + HashtablezInfoHandle infoz_; + phmap::container_internal::CompressedTuple + settings_{0, hasher{}, key_equal{}, allocator_type{}}; +}; + + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +template +class raw_hash_map : public raw_hash_set +{ + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()))); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()))); + + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + using Base = raw_hash_set; + +public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + // TODO(alkis): remove this assertion and verify that reference mapped_type is + // supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() {} + using Base::raw_hash_set; // use raw_hash_set constructor + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { + return insert_or_assign(k, v).first; + } + + template ::value, int>::type = 0, + K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template ::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) { + auto it = this->find(key); + if (it == this->end()) + phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const { + auto it = this->find(key); + if (it == this->end()) + phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) { + return Policy::value(&*try_emplace(key).first); + } + +private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } +}; + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +// Returns "random" seed. +inline size_t RandomSeed() +{ +#if PHMAP_HAVE_THREAD_LOCAL + static thread_local size_t counter = 0; + size_t value = ++counter; +#else // PHMAP_HAVE_THREAD_LOCAL + static std::atomic counter(0); + size_t value = counter.fetch_add(1, std::memory_order_relaxed); +#endif // PHMAP_HAVE_THREAD_LOCAL + return value ^ static_cast(reinterpret_cast(&counter)); +} + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +template class RefSet, + class Mtx_, + class Policy, class Hash, class Eq, class Alloc> +class parallel_hash_set +{ + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + static_assert(N <= 12, "N = 12 means 4096 hash tables!"); + constexpr static size_t num_tables = 1 << N; + constexpr static size_t mask = num_tables - 1; + +public: + using EmbeddedSet = RefSet; + using EmbeddedIterator= typename EmbeddedSet::iterator; + using EmbeddedConstIterator= typename EmbeddedSet::const_iterator; + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename phmap::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename phmap::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + // -------------------------------------------------------------------- + template + using key_arg = typename KeyArgImpl::template type; + +protected: + using Lockable = phmap::LockableImpl; + + // -------------------------------------------------------------------- + struct alignas(64) Inner : public Lockable + { + bool operator==(const Inner& o) const + { + typename Lockable::SharedLocks l(const_cast(*this), const_cast(o)); + return set_ == o.set_; + } + + EmbeddedSet set_; + }; + +private: + // Give an early error when key_type is not hashable/eq. + // -------------------------------------------------------------------- + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using AllocTraits = phmap::allocator_traits; + + static_assert(std::is_lvalue_reference::value, + "Policy::element() must return a reference"); + + template + struct SameAsElementReference : std::is_same< + typename std::remove_cv::type>::type, + typename std::remove_cv::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + // -------------------------------------------------------------------- + template + using RequiresInsertable = typename std::enable_if< + phmap::disjunction, + SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + +public: + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + + // --------------------- i t e r a t o r ------------------------------ + class iterator + { + friend class parallel_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename parallel_hash_set::value_type; + using reference = + phmap::conditional_t; + using pointer = phmap::remove_reference_t*; + using difference_type = typename parallel_hash_set::difference_type; + using Inner = typename parallel_hash_set::Inner; + using EmbeddedSet = typename parallel_hash_set::EmbeddedSet; + using EmbeddedIterator = typename EmbeddedSet::iterator; + + iterator() {} + + reference operator*() const { return *it_; } + pointer operator->() const { return &operator*(); } + + iterator& operator++() { + assert(inner_); // null inner means we are already at the end + ++it_; + skip_empty(); + return *this; + } + + iterator operator++(int) { + assert(inner_); // null inner means we are already at the end + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + return a.inner_ == b.inner_ && (!a.inner_ || a.it_ == b.it_); + } + + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(Inner *inner, Inner *inner_end, const EmbeddedIterator& it) : + inner_(inner), inner_end_(inner_end), it_(it) { // for begin() and end() + if (inner) + it_end_ = inner->set_.end(); + } + + void skip_empty() { + while (it_ == it_end_) { + ++inner_; + if (inner_ == inner_end_) { + inner_ = nullptr; // marks end() + break; + } + else { + it_ = inner_->set_.begin(); + it_end_ = inner_->set_.end(); + } + } + } + + Inner *inner_ = nullptr; + Inner *inner_end_ = nullptr; + EmbeddedIterator it_, it_end_; + }; + + // --------------------- c o n s t i t e r a t o r ----------------- + class const_iterator + { + friend class parallel_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename parallel_hash_set::value_type; + using reference = typename parallel_hash_set::const_reference; + using pointer = typename parallel_hash_set::const_pointer; + using difference_type = typename parallel_hash_set::difference_type; + using Inner = typename parallel_hash_set::Inner; + + const_iterator() {} + // Implicit construction from iterator. + const_iterator(iterator i) : iter_(std::move(i)) {} + + reference operator*() const { return *(iter_); } + pointer operator->() const { return iter_.operator->(); } + + const_iterator& operator++() { + ++iter_; + return *this; + } + const_iterator operator++(int) { return iter_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.iter_ == b.iter_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const Inner *inner, const Inner *inner_end, const EmbeddedIterator& it) + : iter_(const_cast(inner), + const_cast(inner_end), + const_cast(it)) {} + + iterator iter_; + }; + + using node_type = node_handle, Alloc>; + using insert_return_type = InsertReturnType; + + // ------------------------- c o n s t r u c t o r s ------------------ + + parallel_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value) {} + + explicit parallel_hash_set(size_t bucket_count, + const hasher& hash_param = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) { + for (auto& inner : sets_) + inner.set_ = EmbeddedSet(bucket_count / N, hash_param, eq, alloc); + } + + parallel_hash_set(size_t bucket_count, + const hasher& hash_param, + const allocator_type& alloc) + : parallel_hash_set(bucket_count, hash_param, key_equal(), alloc) {} + + parallel_hash_set(size_t bucket_count, const allocator_type& alloc) + : parallel_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit parallel_hash_set(const allocator_type& alloc) + : parallel_hash_set(0, hasher(), key_equal(), alloc) {} + + template + parallel_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash_param = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : parallel_hash_set(bucket_count, hash_param, eq, alloc) { + insert(first, last); + } + + template + parallel_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash_param, const allocator_type& alloc) + : parallel_hash_set(first, last, bucket_count, hash_param, key_equal(), alloc) {} + + template + parallel_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : parallel_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + parallel_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : parallel_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then copies + // // the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // phmap::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // phmap::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + // -------------------------------------------------------------------- + template = 0, RequiresInsertable = 0> + parallel_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash_param = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : parallel_hash_set(init.begin(), init.end(), bucket_count, hash_param, eq, alloc) {} + + parallel_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash_param = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : parallel_hash_set(init.begin(), init.end(), bucket_count, hash_param, eq, alloc) {} + + template = 0, RequiresInsertable = 0> + parallel_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash_param, const allocator_type& alloc) + : parallel_hash_set(init, bucket_count, hash_param, key_equal(), alloc) {} + + parallel_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash_param, const allocator_type& alloc) + : parallel_hash_set(init, bucket_count, hash_param, key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + parallel_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : parallel_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + parallel_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : parallel_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + parallel_hash_set(std::initializer_list init, const allocator_type& alloc) + : parallel_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + parallel_hash_set(std::initializer_list init, + const allocator_type& alloc) + : parallel_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + parallel_hash_set(const parallel_hash_set& that) + : parallel_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + parallel_hash_set(const parallel_hash_set& that, const allocator_type& a) + : parallel_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + for (size_t i=0; i::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value) + : parallel_hash_set(std::move(that), that.alloc_ref()) { + } + + parallel_hash_set(parallel_hash_set&& that, const allocator_type& a) + { + for (size_t i=0; i::is_always_equal::value && + std::is_nothrow_move_assignable::value && + std::is_nothrow_move_assignable::value) { + for (size_t i=0; i(this)->begin(); } + const_iterator end() const { return const_cast(this)->end(); } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + bool empty() const { return !size(); } + + size_t size() const { + size_t sz = 0; + for (const auto& inner : sets_) + sz += inner.set_.size(); + return sz; + } + + size_t capacity() const { + size_t c = 0; + for (const auto& inner : sets_) + c += inner.set_.capacity(); + return c; + } + + size_t max_size() const { return (std::numeric_limits::max)(); } + + PHMAP_ATTRIBUTE_REINITIALIZES void clear() { + for (auto& inner : sets_) + inner.set_.clear(); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + // -------------------------------------------------------------------- + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + std::pair insert(T&& value) { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + // -------------------------------------------------------------------- + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_set> s; + // s.insert({"abc", 42}); + // -------------------------------------------------------------------- + std::pair insert(init_type&& value) { + return emplace(std::move(value)); + } + + template = 0, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) { + return insert(std::forward(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + // -------------------------------------------------------------------- + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) insert(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) { + if (!node) + return {end(), false, node_type()}; + auto& key = node.key(); + size_t hashval = HashElement{hash_ref()}(key); + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + + typename Lockable::UniqueLock m(inner); + auto res = set.insert(std::move(node), hashval); + return { make_iterator(&inner, res.position), + res.inserted, + res.inserted ? node_type() : std::move(res.node) }; + } + + iterator insert(const_iterator, node_type&& node) { + return insert(std::move(node)).first; + } + + struct ReturnKey_ + { + template + Key operator()(Key&& k, const Args&...) const { + return std::forward(k); + } + }; + + template + std::pair emplace_decomposable(const K& key, Args&&... args) + { + size_t hashval = HashElement{hash_ref()}(key); + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + typename Lockable::UniqueLock m(inner); + return make_rv(&inner, set.emplace_decomposable(key, hashval, std::forward(args)...)); + } + + struct EmplaceDecomposable + { + template + std::pair operator()(const K& key, Args&&... args) const { + return s.emplace_decomposable(key, std::forward(args)...); + } + parallel_hash_set& s; + }; + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + // -------------------------------------------------------------------- + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + // -------------------------------------------------------------------- + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + typename std::aligned_storage::type + raw; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + size_t hashval = HashElement{hash_ref()}(PolicyTraits::key(slot)); + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + typename Lockable::UniqueLock m(inner); + typename EmbeddedSet::template InsertSlotWithHash f { + inner, std::move(*slot), hashval}; + return make_rv(PolicyTraits::apply(f, elem)); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + + iterator make_iterator(Inner* inner, const EmbeddedIterator it) + { + if (it == inner->set_.end()) + return iterator(); + return iterator(inner, &sets_[0] + num_tables, it); + } + + std::pair make_rv(Inner* inner, + const std::pair& res) + { + return {iterator(inner, &sets_[0] + num_tables, res.first), res.second}; + } + + template + iterator lazy_emplace(const key_arg& key, F&& f) { + auto hashval = HashElement{hash_ref()}(key); + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + typename Lockable::UniqueLock m(inner); + return make_iterator(&inner, set.lazy_emplace(key, hashval, std::forward(f))); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + // -------------------------------------------------------------------- + template + size_type erase(const key_arg& key) { + auto hashval = HashElement{hash_ref()}(key); + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + typename Lockable::UpgradeLock m(inner); + auto it = set.find(key, hashval); + if (it == set.end()) + return 0; + + typename Lockable::UpgradeToUnique unique(m); + set._erase(it); + return 1; + } + + // -------------------------------------------------------------------- + iterator erase(const_iterator cit) { return erase(cit.iter_); } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). In + // order to erase while iterating across a map, use the following idiom (which + // also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // if () { + // m._erase(it++); + // } else { + // ++it; + // } + // } + // -------------------------------------------------------------------- + void _erase(iterator it) { + assert(it.inner_ != nullptr); + it.inner_->set_._erase(it.it_); + } + void _erase(const_iterator cit) { _erase(cit.iter_); } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + // -------------------------------------------------------------------- + iterator erase(iterator it) { _erase(it++); return it; } + + iterator erase(const_iterator first, const_iterator last) { + while (first != last) { + _erase(first++); + } + return last.iter_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + // -------------------------------------------------------------------- + template + void merge(parallel_hash_set& src) { // NOLINT + assert(this != &src); + if (this != &src) + { + for (size_t i=0; i + void merge(parallel_hash_set&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + return position.iter_.inner_->set_.extract(EmbeddedConstIterator(position.iter_.it_)); + } + + template < + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(parallel_hash_set& that) noexcept( + IsNoThrowSwappable() && + (!AllocTraits::propagate_on_container_swap::value || + IsNoThrowSwappable())) { + using std::swap; + for (size_t i=0; i target ? normalized : target); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + // -------------------------------------------------------------------- + template + size_t count(const key_arg& key) const { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + // -------------------------------------------------------------------- + template + void prefetch(const key_arg& key) const { + (void)key; +#if 0 && defined(__GNUC__) + size_t hashval = HashElement{hash_ref()}(key); + const Inner& inner = sets_[subidx(hashval)]; + const auto& set = inner.set_; + typename Lockable::UniqueLock m(inner); + set.prefetch_hash(hashval); +#endif // __GNUC__ + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + // -------------------------------------------------------------------- + template + iterator find(const key_arg& key, size_t hashval) { + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + typename Lockable::SharedLock m(inner); + auto it = set.find(key, hashval); + return make_iterator(&inner, it); + } + + template + iterator find(const key_arg& key) { + return find(key, HashElement{hash_ref()}(key)); + } + + template + const_iterator find(const key_arg& key, size_t hashval) const { + return const_cast(this)->find(key, hashval); + } + + template + const_iterator find(const key_arg& key) const { + return find(key, HashElement{hash_ref()}(key)); + } + + template + bool contains(const key_arg& key) const { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + template + std::pair equal_range( + const key_arg& key) const { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { + size_t sz = 0; + for (const auto& inner : sets_) + { + typename Lockable::SharedLock m(const_cast(inner)); + sz += inner.set_.bucket_count(); + } + return sz; + } + + float load_factor() const { + size_t capacity = bucket_count(); + return capacity ? static_cast(static_cast(size()) / capacity) : 0; + } + + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const parallel_hash_set& a, const parallel_hash_set& b) { + return std::equal(a.sets_.begin(), a.sets_.end(), b.sets_.begin()); + } + + friend bool operator!=(const parallel_hash_set& a, const parallel_hash_set& b) { + return !(a == b); + } + + friend void swap(parallel_hash_set& a, + parallel_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + +#ifndef PHMAP_NON_DETERMINISTIC + template + bool dump(OutputArchive& ar); + + template + bool load(InputArchive& ar); +#endif + +private: + template + friend struct phmap::container_internal::hashtable_debug_internal::HashtableDebugAccess; + + struct FindElement + { + template + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const parallel_hash_set& s; + }; + + struct HashElement + { + template + size_t operator()(const K& key, Args&&...) const { + return phmap_mix()(h(key)); + } + const hasher& h; + }; + + template + struct EqualElement + { + template + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + // "erases" the object from the container, except that it doesn't actually + // destroy the object. It only updates all the metadata of the class. + // This can be used in conjunction with Policy::transfer to move the object to + // another place. + // -------------------------------------------------------------------- + void erase_meta_only(const_iterator cit) { + auto &it = cit.iter_; + assert(it.set_ != nullptr); + it.set_.erase_meta_only(const_iterator(it.it_)); + } + + void drop_deletes_without_resize() PHMAP_ATTRIBUTE_NOINLINE { + for (auto& inner : sets_) + { + typename Lockable::UniqueLock m(inner); + inner.set_.drop_deletes_without_resize(); + } + } + + bool has_element(const value_type& elem) const { + size_t hashval = PolicyTraits::apply(HashElement{hash_ref()}, elem); + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + typename Lockable::SharedLock m(const_cast(inner)); + return set.has_element(elem, hashval); + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + // -------------------------------------------------------------------- + parallel_hash_set& move_assign(parallel_hash_set&& that, std::true_type) { + parallel_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + + parallel_hash_set& move_assign(parallel_hash_set&& that, std::false_type) { + parallel_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + +protected: + template + std::tuple + find_or_prepare_insert(const K& key, typename Lockable::UniqueLock &mutexlock) { + auto hashval = HashElement{hash_ref()}(key); + Inner& inner = sets_[subidx(hashval)]; + auto& set = inner.set_; + mutexlock = std::move(typename Lockable::UniqueLock(inner)); + auto p = set.find_or_prepare_insert(key, hashval); // std::pair + return std::make_tuple(&inner, p.first, p.second); + } + + iterator iterator_at(Inner *inner, + const EmbeddedIterator& it) { + return {inner, &sets_[0] + num_tables, it}; + } + const_iterator iterator_at(Inner *inner, + const EmbeddedIterator& it) const { + return {inner, &sets_[0] + num_tables, it}; + } + + static size_t subidx(size_t hashval) { + return (hashval ^ (hashval >> N)) & mask; + } + + template + size_t hash(const K& key) { + return HashElement{hash_ref()}(key); + } + + static size_t subcnt() { + return num_tables; + } + +private: + friend struct RawHashSetTestOnlyAccess; + + size_t growth_left() { + size_t sz = 0; + for (const auto& set : sets_) + sz += set.growth_left(); + return sz; + } + + hasher& hash_ref() { return sets_[0].set_.hash_ref(); } + const hasher& hash_ref() const { return sets_[0].set_.hash_ref(); } + key_equal& eq_ref() { return sets_[0].set_.eq_ref(); } + const key_equal& eq_ref() const { return sets_[0].set_.eq_ref(); } + allocator_type& alloc_ref() { return sets_[0].set_.alloc_ref(); } + const allocator_type& alloc_ref() const { + return sets_[0].set_.alloc_ref(); + } + + std::array sets_; +}; + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +template class RefSet, + class Mtx_, + class Policy, class Hash, class Eq, class Alloc> +class parallel_hash_map : public parallel_hash_set +{ + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()))); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()))); + + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + using Base = typename parallel_hash_map::parallel_hash_set; + using Lockable = phmap::LockableImpl; + +public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + // TODO(alkis): remove this assertion and verify that reference mapped_type is + // supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename parallel_hash_map::parallel_hash_set::iterator; + using const_iterator = typename parallel_hash_map::parallel_hash_set::const_iterator; + + parallel_hash_map() {} + +#ifdef __INTEL_COMPILER + using Base::parallel_hash_set; +#else + using parallel_hash_map::parallel_hash_set::parallel_hash_set; +#endif + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { + return insert_or_assign(k, v).first; + } + + template ::value, int>::type = 0, + K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template ::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) { + auto it = this->find(key); + if (it == this->end()) + phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const { + auto it = this->find(key); + if (it == this->end()) + phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) { + return Policy::value(&*try_emplace(key).first); + } + +private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) { + typename Lockable::UniqueLock m; + auto res = this->find_or_prepare_insert(k, m); + typename Base::Inner *inner = std::get<0>(res); + if (std::get<2>(res)) + inner->set_.emplace_at(std::get<1>(res), std::forward(k), std::forward(v)); + else + Policy::value(&*inner->set_.iterator_at(std::get<1>(res))) = std::forward(v); + return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))), + std::get<2>(res)}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) { + typename Lockable::UniqueLock m; + auto res = this->find_or_prepare_insert(k, m); + typename Base::Inner *inner = std::get<0>(res); + if (std::get<2>(res)) + inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))), + std::get<2>(res)}; + } +}; + + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +// ---------------------------------------------------------------------------- +template +void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), + phmap::make_index_sequence< + std::tuple_size::type>::value>()); +} + +// Constructs T using the args specified in the tuple and calls F with the +// constructed value. +// ---------------------------------------------------------------------------- +template +decltype(std::declval()(std::declval())) WithConstructed( + Tuple&& t, F&& f) { + return memory_internal::WithConstructedImpl( + std::forward(t), + phmap::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f)); +} + +// ---------------------------------------------------------------------------- +// Given arguments of an std::pair's consructor, PairArgs() returns a pair of +// tuples with references to the passed arguments. The tuples contain +// constructor arguments for the first and the second elements of the pair. +// +// The following two snippets are equivalent. +// +// 1. std::pair p(args...); +// +// 2. auto a = PairArgs(args...); +// std::pair p(std::piecewise_construct, +// std::move(p.first), std::move(p.second)); +// ---------------------------------------------------------------------------- +inline std::pair, std::tuple<>> PairArgs() { return {}; } + +template +std::pair, std::tuple> PairArgs(F&& f, S&& s) { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), + std::forward_as_tuple(std::forward(s))}; +} + +template +std::pair, std::tuple> PairArgs( + const std::pair& p) { + return PairArgs(p.first, p.second); +} + +template +std::pair, std::tuple> PairArgs(std::pair&& p) { + return PairArgs(std::forward(p.first), std::forward(p.second)); +} + +template +auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s)))) { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s))); +} + +// A helper function for implementing apply() in map policies. +// ---------------------------------------------------------------------------- +template +auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...))) { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...)); +} + +// A helper function for implementing apply() in set policies. +// ---------------------------------------------------------------------------- +template +decltype(std::declval()(std::declval(), std::declval())) +DecomposeValue(F&& f, Arg&& arg) { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); +} + + +// -------------------------------------------------------------------------- +// Policy: a policy defines how to perform different operations on +// the slots of the hashtable (see hash_policy_traits.h for the full interface +// of policy). +// +// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The +// functor should accept a key and return size_t as hash. For best performance +// it is important that the hash function provides high entropy across all bits +// of the hash. +// +// Eq: a (possibly polymorphic) functor that compares two keys for equality. It +// should accept two (of possibly different type) keys and return a bool: true +// if they are equal, false if they are not. If two keys compare equal, then +// their hash values as defined by Hash MUST be equal. +// +// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which +// the storage of the hashtable will be allocated and the elements will be +// constructed and destroyed. +// -------------------------------------------------------------------------- +template +struct FlatHashSetPolicy +{ + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + phmap::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + phmap::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template + static decltype(phmap::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return phmap::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } + + static size_t space_used(const T*) { return 0; } +}; + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +template +struct FlatHashMapPolicy +{ + using slot_policy = container_internal::map_slot_policy; + using slot_type = typename slot_policy::slot_type; + using key_type = K; + using mapped_type = V; + using init_type = std::pair; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + slot_policy::destroy(alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + slot_policy::transfer(alloc, new_slot, old_slot); + } + + template + static decltype(phmap::container_internal::DecomposePair( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return phmap::container_internal::DecomposePair(std::forward(f), + std::forward(args)...); + } + + static size_t space_used(const slot_type*) { return 0; } + + static std::pair& element(slot_type* slot) { return slot->value; } + + static V& value(std::pair* kv) { return kv->second; } + static const V& value(const std::pair* kv) { return kv->second; } +}; + +template +struct node_hash_policy { + static_assert(std::is_lvalue_reference::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + *slot = Policy::new_element(alloc, std::forward(args)...); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::delete_element(alloc, *slot); + } + + template + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { + *new_slot = *old_slot; + } + + static size_t space_used(const slot_type* slot) { + if (slot == nullptr) return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) { return **slot; } + + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { + return P::apply(std::forward(ts)...); + } +}; + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +template +struct NodeHashSetPolicy + : phmap::container_internal::node_hash_policy> +{ + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static T* new_element(Allocator* alloc, Args&&... args) { + using ValueAlloc = + typename phmap::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + T* res = phmap::allocator_traits::allocate(value_alloc, 1); + phmap::allocator_traits::construct(value_alloc, res, + std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, T* elem) { + using ValueAlloc = + typename phmap::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + phmap::allocator_traits::destroy(value_alloc, elem); + phmap::allocator_traits::deallocate(value_alloc, elem, 1); + } + + template + static decltype(phmap::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return phmap::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } + + static size_t element_space_used(const T*) { return sizeof(T); } +}; + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +template +class NodeHashMapPolicy + : public phmap::container_internal::node_hash_policy< + std::pair&, NodeHashMapPolicy> +{ + using value_type = std::pair; + +public: + using key_type = Key; + using mapped_type = Value; + using init_type = std::pair; + + template + static value_type* new_element(Allocator* alloc, Args&&... args) { + using PairAlloc = typename phmap::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + value_type* res = + phmap::allocator_traits::allocate(pair_alloc, 1); + phmap::allocator_traits::construct(pair_alloc, res, + std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, value_type* pair) { + using PairAlloc = typename phmap::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + phmap::allocator_traits::destroy(pair_alloc, pair); + phmap::allocator_traits::deallocate(pair_alloc, pair, 1); + } + + template + static decltype(phmap::container_internal::DecomposePair( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return phmap::container_internal::DecomposePair(std::forward(f), + std::forward(args)...); + } + + static size_t element_space_used(const value_type*) { + return sizeof(value_type); + } + + static Value& value(value_type* elem) { return elem->second; } + static const Value& value(const value_type* elem) { return elem->second; } +}; + + +// -------------------------------------------------------------------------- +// hash_default +// -------------------------------------------------------------------------- + +#if PHMAP_HAVE_STD_STRING_VIEW + +// support char16_t wchar_t .... +template +struct StringHashT +{ + using is_transparent = void; + + size_t operator()(std::basic_string_view v) const { + std::string_view bv{reinterpret_cast(v.data()), v.size() * sizeof(CharT)}; + return std::hash()(bv); + } +}; + +// Supports heterogeneous lookup for basic_string-like elements. +template +struct StringHashEqT +{ + using Hash = StringHashT; + + struct Eq { + using is_transparent = void; + + bool operator()(std::basic_string_view lhs, std::basic_string_view rhs) const { + return lhs == rhs; + } + }; +}; + +template <> +struct HashEq : StringHashEqT {}; + +template <> +struct HashEq : StringHashEqT {}; + +// char16_t +template <> +struct HashEq : StringHashEqT {}; + +template <> +struct HashEq : StringHashEqT {}; + +// wchar_t +template <> +struct HashEq : StringHashEqT {}; + +template <> +struct HashEq : StringHashEqT {}; + +#endif + +// Supports heterogeneous lookup for pointers and smart pointers. +// ------------------------------------------------------------- +template +struct HashEq +{ + struct Hash { + using is_transparent = void; + template + size_t operator()(const U& ptr) const { + return phmap::Hash{}(HashEq::ToPtr(ptr)); + } + }; + + struct Eq { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + +private: + static const T* ToPtr(const T* ptr) { return ptr; } + + template + static const T* ToPtr(const std::unique_ptr& ptr) { + return ptr.get(); + } + + template + static const T* ToPtr(const std::shared_ptr& ptr) { + return ptr.get(); + } +}; + +template +struct HashEq> : HashEq {}; + +template +struct HashEq> : HashEq {}; + +namespace hashtable_debug_internal { + +// -------------------------------------------------------------------------- +// -------------------------------------------------------------------------- +template +struct HashtableDebugAccess> +{ + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, + const typename Set::key_type& key) { + size_t num_probes = 0; + size_t hash = typename Set::HashElement{set.hash_ref()}(key); + auto seq = set.probe(hash); + while (true) { + container_internal::Group g{set.ctrl_ + seq.offset()}; + for (int i : g.Match(container_internal::H2(hash))) { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slots_ + seq.offset((size_t)i)))) + return num_probes; + ++num_probes; + } + if (g.MatchEmpty()) return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) { + size_t capacity = c.capacity_; + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(capacity); + size_t m = layout.AllocSize(); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * c.size(); + } else { + for (size_t i = 0; i != capacity; ++i) { + if (container_internal::IsFull(c.ctrl_[i])) { + m += Traits::space_used(c.slots_ + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) { + size_t capacity = GrowthToLowerboundCapacity(size); + if (capacity == 0) return 0; + auto layout = Set::MakeLayout(NormalizeCapacity(capacity)); + size_t m = layout.AllocSize(); + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * size; + } + return m; + } +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// phmap::flat_hash_set +// ----------------------------------------------------------------------------- +// An `phmap::flat_hash_set` is an unordered associative container which has +// been optimized for both speed and memory footprint in most common use cases. +// Its interface is similar to that of `std::unordered_set` with the +// following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the set is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `_erase(iterator)` overload. +// ----------------------------------------------------------------------------- +template // default values in phmap_fwd_decl.h +class flat_hash_set + : public phmap::container_internal::raw_hash_set< + phmap::container_internal::FlatHashSetPolicy, Hash, Eq, Alloc> +{ + using Base = typename flat_hash_set::raw_hash_set; + +public: + flat_hash_set() {} +#ifdef __INTEL_COMPILER + using Base::raw_hash_set; +#else + using Base::Base; +#endif + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; // may shrink - To avoid shrinking `erase(begin(), end())` + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; +}; + +// ----------------------------------------------------------------------------- +// phmap::flat_hash_map +// ----------------------------------------------------------------------------- +// +// An `phmap::flat_hash_map` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map` with +// the following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `_erase(iterator)` overload. +// ----------------------------------------------------------------------------- +template // default values in phmap_fwd_decl.h +class flat_hash_map : public phmap::container_internal::raw_hash_map< + phmap::container_internal::FlatHashMapPolicy, + Hash, Eq, Alloc> { + using Base = typename flat_hash_map::raw_hash_map; + +public: + flat_hash_map() {} +#ifdef __INTEL_COMPILER + using Base::raw_hash_map; +#else + using Base::Base; +#endif + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::insert_or_assign; + using Base::emplace; + using Base::emplace_hint; + using Base::try_emplace; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::at; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::operator[]; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; +}; + +// ----------------------------------------------------------------------------- +// phmap::node_hash_set +// ----------------------------------------------------------------------------- +// An `phmap::node_hash_set` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_set` with the +// following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// ----------------------------------------------------------------------------- +template // default values in phmap_fwd_decl.h +class node_hash_set + : public phmap::container_internal::raw_hash_set< + phmap::container_internal::NodeHashSetPolicy, Hash, Eq, Alloc> +{ + using Base = typename node_hash_set::raw_hash_set; + +public: + node_hash_set() {} +#ifdef __INTEL_COMPILER + using Base::raw_hash_set; +#else + using Base::Base; +#endif + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; + typename Base::hasher hash_funct() { return this->hash_function(); } + void resize(typename Base::size_type hint) { this->rehash(hint); } +}; + +// ----------------------------------------------------------------------------- +// phmap::node_hash_map +// ----------------------------------------------------------------------------- +// +// An `phmap::node_hash_map` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map` with +// the following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// ----------------------------------------------------------------------------- +template // default values in phmap_fwd_decl.h +class node_hash_map + : public phmap::container_internal::raw_hash_map< + phmap::container_internal::NodeHashMapPolicy, Hash, Eq, + Alloc> +{ + using Base = typename node_hash_map::raw_hash_map; + +public: + node_hash_map() {} +#ifdef __INTEL_COMPILER + using Base::raw_hash_map; +#else + using Base::Base; +#endif + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::insert_or_assign; + using Base::emplace; + using Base::emplace_hint; + using Base::try_emplace; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::at; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::operator[]; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; + typename Base::hasher hash_funct() { return this->hash_function(); } + void resize(typename Base::size_type hint) { this->rehash(hint); } +}; + +// ----------------------------------------------------------------------------- +// phmap::parallel_flat_hash_set +// ----------------------------------------------------------------------------- +template // default values in phmap_fwd_decl.h +class parallel_flat_hash_set + : public phmap::container_internal::parallel_hash_set< + N, phmap::container_internal::raw_hash_set, Mtx_, + phmap::container_internal::FlatHashSetPolicy, + Hash, Eq, Alloc> +{ + using Base = typename parallel_flat_hash_set::parallel_hash_set; + +public: + parallel_flat_hash_set() {} +#ifdef __INTEL_COMPILER + using Base::parallel_hash_set; +#else + using Base::Base; +#endif + using Base::hash; + using Base::subidx; + using Base::subcnt; + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; +}; + +// ----------------------------------------------------------------------------- +// phmap::parallel_flat_hash_map - default values in phmap_fwd_decl.h +// ----------------------------------------------------------------------------- +template +class parallel_flat_hash_map : public phmap::container_internal::parallel_hash_map< + N, phmap::container_internal::raw_hash_set, Mtx_, + phmap::container_internal::FlatHashMapPolicy, + Hash, Eq, Alloc> +{ + using Base = typename parallel_flat_hash_map::parallel_hash_map; + +public: + parallel_flat_hash_map() {} +#ifdef __INTEL_COMPILER + using Base::parallel_hash_map; +#else + using Base::Base; +#endif + using Base::hash; + using Base::subidx; + using Base::subcnt; + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::insert_or_assign; + using Base::emplace; + using Base::emplace_hint; + using Base::try_emplace; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::at; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::operator[]; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; +}; + +// ----------------------------------------------------------------------------- +// phmap::parallel_node_hash_set +// ----------------------------------------------------------------------------- +template +class parallel_node_hash_set + : public phmap::container_internal::parallel_hash_set< + N, phmap::container_internal::raw_hash_set, Mtx_, + phmap::container_internal::NodeHashSetPolicy, Hash, Eq, Alloc> +{ + using Base = typename parallel_node_hash_set::parallel_hash_set; + +public: + parallel_node_hash_set() {} +#ifdef __INTEL_COMPILER + using Base::parallel_hash_set; +#else + using Base::Base; +#endif + using Base::hash; + using Base::subidx; + using Base::subcnt; + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::emplace; + using Base::emplace_hint; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; + typename Base::hasher hash_funct() { return this->hash_function(); } + void resize(typename Base::size_type hint) { this->rehash(hint); } +}; + +// ----------------------------------------------------------------------------- +// phmap::parallel_node_hash_map +// ----------------------------------------------------------------------------- +template +class parallel_node_hash_map + : public phmap::container_internal::parallel_hash_map< + N, phmap::container_internal::raw_hash_set, Mtx_, + phmap::container_internal::NodeHashMapPolicy, Hash, Eq, + Alloc> +{ + using Base = typename parallel_node_hash_map::parallel_hash_map; + +public: + parallel_node_hash_map() {} +#ifdef __INTEL_COMPILER + using Base::parallel_hash_map; +#else + using Base::Base; +#endif + using Base::hash; + using Base::subidx; + using Base::subcnt; + using Base::begin; + using Base::cbegin; + using Base::cend; + using Base::end; + using Base::capacity; + using Base::empty; + using Base::max_size; + using Base::size; + using Base::clear; + using Base::erase; + using Base::insert; + using Base::insert_or_assign; + using Base::emplace; + using Base::emplace_hint; + using Base::try_emplace; + using Base::extract; + using Base::merge; + using Base::swap; + using Base::rehash; + using Base::reserve; + using Base::at; + using Base::contains; + using Base::count; + using Base::equal_range; + using Base::find; + using Base::operator[]; + using Base::bucket_count; + using Base::load_factor; + using Base::max_load_factor; + using Base::get_allocator; + using Base::hash_function; + using Base::key_eq; + typename Base::hasher hash_funct() { return this->hash_function(); } + void resize(typename Base::size_type hint) { this->rehash(hint); } +}; + +} // namespace phmap + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // phmap_h_guard_ diff --git a/src/includes/3thparty/parallel_hashmap/phmap_base.h b/src/includes/3thparty/parallel_hashmap/phmap_base.h new file mode 100644 index 0000000..4cc3e00 --- /dev/null +++ b/src/includes/3thparty/parallel_hashmap/phmap_base.h @@ -0,0 +1,5167 @@ +#if !defined(phmap_base_h_guard_) +#define phmap_base_h_guard_ + +// --------------------------------------------------------------------------- +// Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp) +// with modifications. +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// --------------------------------------------------------------------------- + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // for std::lock + +#include "phmap_config.h" + +#ifdef PHMAP_HAVE_SHARED_MUTEX + #include // after "phmap_config.h" +#endif + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4514) // unreferenced inline function has been removed + #pragma warning(disable : 4582) // constructor is not implicitly called + #pragma warning(disable : 4625) // copy constructor was implicitly defined as deleted + #pragma warning(disable : 4626) // assignment operator was implicitly defined as deleted + #pragma warning(disable : 4710) // function not inlined + #pragma warning(disable : 4711) // selected for automatic inline expansion + #pragma warning(disable : 4820) // '6' bytes padding added after data member +#endif // _MSC_VER + +namespace phmap { + +template using Allocator = typename std::allocator; + +template using Pair = typename std::pair; + +template +struct EqualTo +{ + inline bool operator()(const T& a, const T& b) const + { + return std::equal_to()(a, b); + } +}; + +template +struct Less +{ + inline bool operator()(const T& a, const T& b) const + { + return std::less()(a, b); + } +}; + +namespace type_traits_internal { + +template +struct VoidTImpl { + using type = void; +}; + +// This trick to retrieve a default alignment is necessary for our +// implementation of aligned_storage_t to be consistent with any implementation +// of std::aligned_storage. +// --------------------------------------------------------------------------- +template > +struct default_alignment_of_aligned_storage; + +template +struct default_alignment_of_aligned_storage> { + static constexpr size_t value = Align; +}; + +// NOTE: The `is_detected` family of templates here differ from the library +// fundamentals specification in that for library fundamentals, `Op` is +// evaluated as soon as the type `is_detected` undergoes +// substitution, regardless of whether or not the `::value` is accessed. That +// is inconsistent with all other standard traits and prevents lazy evaluation +// in larger contexts (such as if the `is_detected` check is a trailing argument +// of a `conjunction`. This implementation opts to instead be lazy in the same +// way that the standard traits are (this "defect" of the detection idiom +// specifications has been reported). +// --------------------------------------------------------------------------- + +template class Op, class... Args> +struct is_detected_impl { + using type = std::false_type; +}; + +template