Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 71F9D200B84 for ; Tue, 20 Sep 2016 19:56:50 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 7068F160AE3; Tue, 20 Sep 2016 17:56:50 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 1BAC9160AC5 for ; Tue, 20 Sep 2016 19:56:47 +0200 (CEST) Received: (qmail 42577 invoked by uid 500); 20 Sep 2016 17:56:47 -0000 Mailing-List: contact commits-help@quickstep.incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@quickstep.incubator.apache.org Delivered-To: mailing list commits@quickstep.incubator.apache.org Received: (qmail 42568 invoked by uid 99); 20 Sep 2016 17:56:47 -0000 Received: from pnap-us-west-generic-nat.apache.org (HELO spamd1-us-west.apache.org) (209.188.14.142) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 20 Sep 2016 17:56:47 +0000 Received: from localhost (localhost [127.0.0.1]) by spamd1-us-west.apache.org (ASF Mail Server at spamd1-us-west.apache.org) with ESMTP id B61D2C021E for ; Tue, 20 Sep 2016 17:56:46 +0000 (UTC) X-Virus-Scanned: Debian amavisd-new at spamd1-us-west.apache.org X-Spam-Flag: NO X-Spam-Score: -5.144 X-Spam-Level: X-Spam-Status: No, score=-5.144 tagged_above=-999 required=6.31 tests=[KAM_LAZY_DOMAIN_SECURITY=1, RCVD_IN_DNSWL_HI=-5, RCVD_IN_MSPIKE_H3=-0.01, RCVD_IN_MSPIKE_WL=-0.01, RP_MATCHES_RCVD=-1.124] autolearn=disabled Received: from mx1-lw-us.apache.org ([10.40.0.8]) by localhost (spamd1-us-west.apache.org [10.40.0.7]) (amavisd-new, port 10024) with ESMTP id RyKPzKeN-kXj for ; Tue, 20 Sep 2016 17:56:42 +0000 (UTC) Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx1-lw-us.apache.org (ASF Mail Server at mx1-lw-us.apache.org) with SMTP id 7C7D25FDB9 for ; Tue, 20 Sep 2016 17:56:40 +0000 (UTC) Received: (qmail 41977 invoked by uid 99); 20 Sep 2016 17:56:39 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 20 Sep 2016 17:56:39 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 5D7CDEF99F; Tue, 20 Sep 2016 17:56:39 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: hbdeshmukh@apache.org To: commits@quickstep.incubator.apache.org Date: Tue, 20 Sep 2016 17:56:51 -0000 Message-Id: <23cbb99b3087410b97788c9c2d8c7788@git.apache.org> In-Reply-To: <2447dffeae714bc48ad28d886cc9feea@git.apache.org> References: <2447dffeae714bc48ad28d886cc9feea@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [13/20] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format. archived-at: Tue, 20 Sep 2016 17:56:50 -0000 http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/c123bd49/storage/FastSeparateChainingHashTable.hpp ---------------------------------------------------------------------- diff --git a/storage/FastSeparateChainingHashTable.hpp b/storage/FastSeparateChainingHashTable.hpp index 0670993..886a8ca 100644 --- a/storage/FastSeparateChainingHashTable.hpp +++ b/storage/FastSeparateChainingHashTable.hpp @@ -27,8 +27,8 @@ #include #include -#include "storage/HashTable.hpp" #include "storage/FastHashTable.hpp" +#include "storage/HashTable.hpp" #include "storage/HashTableBase.hpp" #include "storage/HashTableKeyManager.hpp" #include "storage/StorageBlob.hpp" @@ -55,43 +55,42 @@ template -class FastSeparateChainingHashTable : public FastHashTable { +class FastSeparateChainingHashTable + : public FastHashTable { public: - FastSeparateChainingHashTable(const std::vector &key_types, - const std::size_t num_entries, - const std::vector &payload_sizes, - const std::vector &handles, - StorageManager *storage_manager); - - FastSeparateChainingHashTable(const std::vector &key_types, - void *hash_table_memory, - const std::size_t hash_table_memory_size, - const bool new_hash_table, - const bool hash_table_memory_zeroed); + FastSeparateChainingHashTable(const std::vector &key_types, + const std::size_t num_entries, + const std::vector &payload_sizes, + const std::vector &handles, + StorageManager *storage_manager); + + FastSeparateChainingHashTable(const std::vector &key_types, + void *hash_table_memory, + const std::size_t hash_table_memory_size, + const bool new_hash_table, + const bool hash_table_memory_zeroed); // Delegating constructors for single scalar keys. FastSeparateChainingHashTable(const Type &key_type, - const std::size_t num_entries, - StorageManager *storage_manager) - : FastSeparateChainingHashTable(std::vector(1, &key_type), - num_entries, - storage_manager) { - } + const std::size_t num_entries, + StorageManager *storage_manager) + : FastSeparateChainingHashTable(std::vector(1, &key_type), + num_entries, + storage_manager) {} FastSeparateChainingHashTable(const Type &key_type, - void *hash_table_memory, - const std::size_t hash_table_memory_size, - const bool new_hash_table, - const bool hash_table_memory_zeroed) - : FastSeparateChainingHashTable(std::vector(1, &key_type), - hash_table_memory, - hash_table_memory_size, - new_hash_table, - hash_table_memory_zeroed) { - } + void *hash_table_memory, + const std::size_t hash_table_memory_size, + const bool new_hash_table, + const bool hash_table_memory_zeroed) + : FastSeparateChainingHashTable(std::vector(1, &key_type), + hash_table_memory, + hash_table_memory_size, + new_hash_table, + hash_table_memory_zeroed) {} ~FastSeparateChainingHashTable() override { DestroyValues(buckets_, @@ -106,48 +105,54 @@ class FastSeparateChainingHashTable : public FastHashTablebuckets_allocated.load(std::memory_order_relaxed); } - const uint8_t* getSingle(const TypedValue &key) const override; - const uint8_t* getSingleCompositeKey(const std::vector &key) const override; - const uint8_t* getSingleCompositeKey(const std::vector &key, int index) const override; + const std::uint8_t* getSingle(const TypedValue &key) const override; + const std::uint8_t* getSingleCompositeKey( + const std::vector &key) const override; + const std::uint8_t* getSingleCompositeKey(const std::vector &key, + int index) const override; void getAll(const TypedValue &key, - std::vector *values) const override; - void getAllCompositeKey(const std::vector &key, - std::vector *values) const override; + std::vector *values) const override; + void getAllCompositeKey( + const std::vector &key, + std::vector *values) const override; protected: - HashTablePutResult putInternal(const TypedValue &key, - const std::size_t variable_key_size, - const uint8_t &value, - HashTablePreallocationState *prealloc_state) override; - - HashTablePutResult putCompositeKeyInternalFast(const std::vector &key, - const std::size_t variable_key_size, - const std::uint8_t *init_value_ptr, - HashTablePreallocationState *prealloc_state) override; - - uint8_t* upsertInternalFast(const TypedValue &key, - const std::size_t variable_key_size, - const std::uint8_t *init_value_ptr) override; - - uint8_t* upsertCompositeKeyInternalFast(const std::vector &key, - const std::uint8_t *init_value_ptr, - const std::size_t variable_key_size) override; + HashTablePutResult putInternal( + const TypedValue &key, + const std::size_t variable_key_size, + const std::uint8_t &value, + HashTablePreallocationState *prealloc_state) override; + + HashTablePutResult putCompositeKeyInternalFast( + const std::vector &key, + const std::size_t variable_key_size, + const std::uint8_t *init_value_ptr, + HashTablePreallocationState *prealloc_state) override; + + std::uint8_t* upsertInternalFast(const TypedValue &key, + const std::size_t variable_key_size, + const std::uint8_t *init_value_ptr) override; + + std::uint8_t* upsertCompositeKeyInternalFast( + const std::vector &key, + const std::uint8_t *init_value_ptr, + const std::size_t variable_key_size) override; bool getNextEntry(TypedValue *key, - const uint8_t **value, + const std::uint8_t **value, std::size_t *entry_num) const override; bool getNextEntryCompositeKey(std::vector *key, - const uint8_t **value, + const std::uint8_t **value, std::size_t *entry_num) const override; bool getNextEntryForKey(const TypedValue &key, const std::size_t hash_code, - const uint8_t **value, + const std::uint8_t **value, std::size_t *entry_num) const override; bool getNextEntryForCompositeKey(const std::vector &key, const std::size_t hash_code, - const uint8_t **value, + const std::uint8_t **value, std::size_t *entry_num) const override; bool hasKey(const TypedValue &key) const override; @@ -157,15 +162,16 @@ class FastSeparateChainingHashTable : public FastHashTable buckets_allocated; + alignas(kCacheLineBytes) std::atomic buckets_allocated; alignas(kCacheLineBytes) std::atomic variable_length_bytes_allocated; }; @@ -179,16 +185,18 @@ class FastSeparateChainingHashTable : public FastHashTabletotal_payload_size_ + fixed_key_size - 1) / kBucketAlignment) + 1) - * kBucketAlignment; + return (((kValueOffset + this->total_payload_size_ + fixed_key_size - 1) / + kBucketAlignment) + + 1) * + kBucketAlignment; } // If ValueT is not trivially destructible, invoke its destructor for all // values held in the specified buckets (including those in "empty" buckets // that were default constructed). If ValueT is trivially destructible, this // is a no-op. void DestroyValues(void *buckets, - const std::size_t num_buckets, - const std::size_t bucket_size); + const std::size_t num_buckets, + const std::size_t bucket_size); // Attempt to find an empty bucket to insert 'hash_code' into, starting after // '*bucket' in the chain (or, if '*bucket' is NULL, starting from the slot @@ -201,30 +209,33 @@ class FastSeparateChainingHashTable : public FastHashTablenum_buckets' is ever // deallocated after being allocated. - inline bool locateBucketForInsertion(const std::size_t hash_code, - const std::size_t variable_key_allocation_required, - void **bucket, - std::atomic **pending_chain_ptr, - std::size_t *pending_chain_ptr_finish_value, - HashTablePreallocationState *prealloc_state); + inline bool locateBucketForInsertion( + const std::size_t hash_code, + const std::size_t variable_key_allocation_required, + void **bucket, + std::atomic **pending_chain_ptr, + std::size_t *pending_chain_ptr_finish_value, + HashTablePreallocationState *prealloc_state); // Write a scalar 'key' and its 'hash_code' into the '*bucket', which was // found by locateBucketForInsertion(). Assumes that storage for a // variable-length key copy (if any) was already allocated by a successful // call to allocateVariableLengthKeyStorage(). - inline void writeScalarKeyToBucket(const TypedValue &key, - const std::size_t hash_code, - void *bucket, - HashTablePreallocationState *prealloc_state); + inline void writeScalarKeyToBucket( + const TypedValue &key, + const std::size_t hash_code, + void *bucket, + HashTablePreallocationState *prealloc_state); // Write a composite 'key' and its 'hash_code' into the '*bucket', which was // found by locateBucketForInsertion(). Assumes that storage for // variable-length key copies (if any) was already allocated by a successful // call to allocateVariableLengthKeyStorage(). - inline void writeCompositeKeyToBucket(const std::vector &key, - const std::size_t hash_code, - void *bucket, - HashTablePreallocationState *prealloc_state); + inline void writeCompositeKeyToBucket( + const std::vector &key, + const std::size_t hash_code, + void *bucket, + HashTablePreallocationState *prealloc_state); // Determine whether it is actually necessary to resize this hash table. // Checks that there is at least one unallocated bucket, and that there is @@ -275,30 +286,37 @@ template -FastSeparateChainingHashTable - ::FastSeparateChainingHashTable(const std::vector &key_types, - const std::size_t num_entries, - const std::vector &payload_sizes, - const std::vector &handles, - StorageManager *storage_manager) - : FastHashTable( - key_types, - num_entries, - handles, - payload_sizes, - storage_manager, - false, - false, - true), - kBucketAlignment(alignof(std::atomic)), - kValueOffset(sizeof(std::atomic) + sizeof(std::size_t)), - key_manager_(this->key_types_, kValueOffset + this->total_payload_size_), - bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) { - init_payload_ = static_cast(calloc(this->total_payload_size_, 1)); +FastSeparateChainingHashTable:: + FastSeparateChainingHashTable( + const std::vector &key_types, + const std::size_t num_entries, + const std::vector &payload_sizes, + const std::vector &handles, + StorageManager *storage_manager) + : FastHashTable(key_types, + num_entries, + handles, + payload_sizes, + storage_manager, + false, + false, + true), + kBucketAlignment(alignof(std::atomic)), + kValueOffset(sizeof(std::atomic) + sizeof(std::size_t)), + key_manager_(this->key_types_, kValueOffset + this->total_payload_size_), + bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) { + init_payload_ = + static_cast(calloc(this->total_payload_size_, 1)); int k = 0; for (auto handle : handles) { - handle->initPayload(init_payload_+this->payload_offsets_[k]); - k++; + handle->initPayload(init_payload_ + this->payload_offsets_[k]); + k++; } // Bucket size always rounds up to the alignment requirement of the atomic // size_t "next" pointer at the front or a ValueT, whichever is larger. @@ -308,19 +326,23 @@ FastSeparateChainingHashTablesetKeyInline(key_manager_.getKeyInline()); // Pick out a prime number of slots and calculate storage requirements. - std::size_t num_slots_tmp = get_next_prime_number(num_entries * kHashTableLoadFactor); - std::size_t required_memory = sizeof(Header) - + num_slots_tmp * sizeof(std::atomic) - + (num_slots_tmp / kHashTableLoadFactor) - * (bucket_size_ + key_manager_.getEstimatedVariableKeySize()); - std::size_t num_storage_slots = this->storage_manager_->SlotsNeededForBytes(required_memory); + std::size_t num_slots_tmp = + get_next_prime_number(num_entries * kHashTableLoadFactor); + std::size_t required_memory = + sizeof(Header) + num_slots_tmp * sizeof(std::atomic) + + (num_slots_tmp / kHashTableLoadFactor) * + (bucket_size_ + key_manager_.getEstimatedVariableKeySize()); + std::size_t num_storage_slots = + this->storage_manager_->SlotsNeededForBytes(required_memory); if (num_storage_slots == 0) { - FATAL_ERROR("Storage requirement for SeparateChainingHashTable " - "exceeds maximum allocation size."); + FATAL_ERROR( + "Storage requirement for SeparateChainingHashTable " + "exceeds maximum allocation size."); } // Get a StorageBlob to hold the hash table. - const block_id blob_id = this->storage_manager_->createBlob(num_storage_slots); + const block_id blob_id = + this->storage_manager_->createBlob(num_storage_slots); this->blob_ = this->storage_manager_->getBlobMutable(blob_id); void *aligned_memory_start = this->blob_->getMemoryMutable(); @@ -328,14 +350,14 @@ FastSeparateChainingHashTableblob_->getMemoryMutable()) { // This should also be impossible, since the StorageManager allocates slots // aligned to kCacheLineBytes. @@ -346,8 +368,9 @@ FastSeparateChainingHashTable(aligned_memory_start); - aligned_memory_start = static_cast(aligned_memory_start) + sizeof(Header); + header_ = static_cast
(aligned_memory_start); + aligned_memory_start = + static_cast(aligned_memory_start) + sizeof(Header); available_memory -= sizeof(Header); // Recompute the number of slots & buckets using the actual available memory. @@ -355,19 +378,20 @@ FastSeparateChainingHashTable) - + bucket_size_ - + key_manager_.getEstimatedVariableKeySize()); - num_slots_tmp = get_previous_prime_number(num_buckets_tmp * kHashTableLoadFactor); + std::size_t num_buckets_tmp = + available_memory / + (kHashTableLoadFactor * sizeof(std::atomic) + bucket_size_ + + key_manager_.getEstimatedVariableKeySize()); + num_slots_tmp = + get_previous_prime_number(num_buckets_tmp * kHashTableLoadFactor); num_buckets_tmp = num_slots_tmp / kHashTableLoadFactor; DEBUG_ASSERT(num_slots_tmp > 0); DEBUG_ASSERT(num_buckets_tmp > 0); // Locate the slot array. - slots_ = static_cast*>(aligned_memory_start); - aligned_memory_start = static_cast(aligned_memory_start) - + sizeof(std::atomic) * num_slots_tmp; + slots_ = static_cast *>(aligned_memory_start); + aligned_memory_start = static_cast(aligned_memory_start) + + sizeof(std::atomic) * num_slots_tmp; available_memory -= sizeof(std::atomic) * num_slots_tmp; // Locate the buckets. @@ -375,17 +399,16 @@ FastSeparateChainingHashTable, we may need to adjust the start of the bucket // array. - if (align(kBucketAlignment, - bucket_size_, - buckets_, - available_memory) - == nullptr) { - FATAL_ERROR("StorageBlob used to hold resizable " - "SeparateChainingHashTable is too small to meet " - "alignment requirements of buckets."); + if (align(kBucketAlignment, bucket_size_, buckets_, available_memory) == + nullptr) { + FATAL_ERROR( + "StorageBlob used to hold resizable " + "SeparateChainingHashTable is too small to meet " + "alignment requirements of buckets."); } else if (buckets_ != aligned_memory_start) { - DEV_WARNING("Bucket array start position adjusted to meet alignment " - "requirement for SeparateChainingHashTable's value type."); + DEV_WARNING( + "Bucket array start position adjusted to meet alignment " + "requirement for SeparateChainingHashTable's value type."); if (num_buckets_tmp * bucket_size_ > available_memory) { --num_buckets_tmp; } @@ -401,7 +424,7 @@ FastSeparateChainingHashTable(buckets_) + header_->num_buckets * bucket_size_, + static_cast(buckets_) + header_->num_buckets * bucket_size_, available_memory, &(header_->variable_length_bytes_allocated)); } @@ -410,36 +433,43 @@ template -FastSeparateChainingHashTable - ::FastSeparateChainingHashTable(const std::vector &key_types, - void *hash_table_memory, - const std::size_t hash_table_memory_size, - const bool new_hash_table, - const bool hash_table_memory_zeroed) - : FastHashTable( - key_types, - hash_table_memory, - hash_table_memory_size, - new_hash_table, - hash_table_memory_zeroed, - false, - false, - true), - kBucketAlignment(alignof(std::atomic) < alignof(uint8_t) ? alignof(uint8_t) - : alignof(std::atomic)), - kValueOffset(sizeof(std::atomic) + sizeof(std::size_t)), - key_manager_(this->key_types_, kValueOffset + sizeof(uint8_t)), - bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) { +FastSeparateChainingHashTable:: + FastSeparateChainingHashTable(const std::vector &key_types, + void *hash_table_memory, + const std::size_t hash_table_memory_size, + const bool new_hash_table, + const bool hash_table_memory_zeroed) + : FastHashTable(key_types, + hash_table_memory, + hash_table_memory_size, + new_hash_table, + hash_table_memory_zeroed, + false, + false, + true), + kBucketAlignment(alignof(std::atomic) < alignof(std::uint8_t) + ? alignof(std::uint8_t) + : alignof(std::atomic)), + kValueOffset(sizeof(std::atomic) + sizeof(std::size_t)), + key_manager_(this->key_types_, kValueOffset + sizeof(std::uint8_t)), + bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) { // Bucket size always rounds up to the alignment requirement of the atomic // size_t "next" pointer at the front or a ValueT, whichever is larger. // // Make sure that the larger of the two alignment requirements also satisfies // the smaller. - static_assert(alignof(std::atomic) < alignof(uint8_t) - ? alignof(uint8_t) % alignof(std::atomic) == 0 - : alignof(std::atomic) % alignof(uint8_t) == 0, - "Alignment requirement of std::atomic does not " - "evenly divide with alignment requirement of ValueT."); + static_assert( + alignof(std::atomic) < alignof(std::uint8_t) + ? alignof(std::uint8_t) % alignof(std::atomic) == 0 + : alignof(std::atomic) % alignof(std::uint8_t) == 0, + "Alignment requirement of std::atomic does not " + "evenly divide with alignment requirement of ValueT."); // Give base HashTable information about what key components are stored // inline from 'key_manager_'. @@ -460,12 +490,13 @@ FastSeparateChainingHashTablehash_table_memory_) { @@ -477,32 +508,36 @@ FastSeparateChainingHashTable(aligned_memory_start); - aligned_memory_start = static_cast(aligned_memory_start) + sizeof(Header); + header_ = static_cast
(aligned_memory_start); + aligned_memory_start = + static_cast(aligned_memory_start) + sizeof(Header); available_memory -= sizeof(Header); if (new_hash_table) { - std::size_t estimated_bucket_capacity - = available_memory / (kHashTableLoadFactor * sizeof(std::atomic) - + bucket_size_ - + key_manager_.getEstimatedVariableKeySize()); - std::size_t num_slots = get_previous_prime_number(estimated_bucket_capacity * kHashTableLoadFactor); + std::size_t estimated_bucket_capacity = + available_memory / + (kHashTableLoadFactor * sizeof(std::atomic) + + bucket_size_ + key_manager_.getEstimatedVariableKeySize()); + std::size_t num_slots = get_previous_prime_number( + estimated_bucket_capacity * kHashTableLoadFactor); // Fill in the header. header_->num_slots = num_slots; header_->num_buckets = num_slots / kHashTableLoadFactor; header_->buckets_allocated.store(0, std::memory_order_relaxed); - header_->variable_length_bytes_allocated.store(0, std::memory_order_relaxed); + header_->variable_length_bytes_allocated.store(0, + std::memory_order_relaxed); } // Locate the slot array. - slots_ = static_cast*>(aligned_memory_start); - aligned_memory_start = static_cast(aligned_memory_start) - + sizeof(std::atomic) * header_->num_slots; + slots_ = static_cast *>(aligned_memory_start); + aligned_memory_start = static_cast(aligned_memory_start) + + sizeof(std::atomic) * header_->num_slots; available_memory -= sizeof(std::atomic) * header_->num_slots; if (new_hash_table && !hash_table_memory_zeroed) { - std::memset(slots_, 0x0, sizeof(std::atomic) * header_->num_slots); + std::memset( + slots_, 0x0, sizeof(std::atomic) * header_->num_slots); } // Locate the buckets. @@ -510,20 +545,20 @@ FastSeparateChainingHashTable kCacheLineBytes) alignment requirements specified using alignas(). - if (align(kBucketAlignment, - bucket_size_, - buckets_, - available_memory) - == nullptr) { + if (align(kBucketAlignment, bucket_size_, buckets_, available_memory) == + nullptr) { FATAL_ERROR("Attempted to create a non-resizable " << "SeparateChainingHashTable with " - << this->hash_table_memory_size_ << " bytes of memory at " - << this->hash_table_memory_ << ", which can hold an aligned " + << this->hash_table_memory_size_ + << " bytes of memory at " + << this->hash_table_memory_ + << ", which can hold an aligned " << "SeparateChainingHashTable::Header but does not have " << "enough remaining space for even a single hash bucket."); } else if (buckets_ != aligned_memory_start) { - DEV_WARNING("Bucket array start position adjusted to meet alignment " - "requirement for SeparateChainingHashTable's value type."); + DEV_WARNING( + "Bucket array start position adjusted to meet alignment " + "requirement for SeparateChainingHashTable's value type."); if (header_->num_buckets * bucket_size_ > available_memory) { DEBUG_ASSERT(new_hash_table); --(header_->num_buckets); @@ -538,7 +573,7 @@ FastSeparateChainingHashTable(buckets_) + header_->num_buckets * bucket_size_, + static_cast(buckets_) + header_->num_buckets * bucket_size_, available_memory, &(header_->variable_length_bytes_allocated)); } @@ -547,16 +582,18 @@ template -void FastSeparateChainingHashTable - ::clear() { - const std::size_t used_buckets = header_->buckets_allocated.load(std::memory_order_relaxed); +void FastSeparateChainingHashTable::clear() { + const std::size_t used_buckets = + header_->buckets_allocated.load(std::memory_order_relaxed); // Destroy existing values, if necessary. - DestroyValues(buckets_, - used_buckets, - bucket_size_); + DestroyValues(buckets_, used_buckets, bucket_size_); // Zero-out slot array. - std::memset(slots_, 0x0, sizeof(std::atomic) * header_->num_slots); + std::memset( + slots_, 0x0, sizeof(std::atomic) * header_->num_slots); // Zero-out used buckets. std::memset(buckets_, 0x0, used_buckets * bucket_size_); @@ -570,24 +607,33 @@ template -const uint8_t* FastSeparateChainingHashTable - ::getSingle(const TypedValue &key) const { +const std::uint8_t* FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::getSingle(const TypedValue &key) const { DEBUG_ASSERT(!allow_duplicate_keys); DEBUG_ASSERT(this->key_types_.size() == 1); - DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); + DEBUG_ASSERT( + key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); const std::size_t hash_code = key.getHash(); - std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + std::size_t bucket_ref = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); while (bucket_ref != 0) { DEBUG_ASSERT(bucket_ref != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.scalarKeyCollisionCheck(key, bucket)) { // Match located. - return reinterpret_cast(bucket + kValueOffset); + return reinterpret_cast(bucket + kValueOffset); } - bucket_ref = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); + bucket_ref = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); } // Reached the end of the chain and didn't find a match. @@ -598,23 +644,31 @@ template -const uint8_t* FastSeparateChainingHashTable - ::getSingleCompositeKey(const std::vector &key) const { +const std::uint8_t* FastSeparateChainingHashTable:: + getSingleCompositeKey(const std::vector &key) const { DEBUG_ASSERT(!allow_duplicate_keys); DEBUG_ASSERT(this->key_types_.size() == key.size()); const std::size_t hash_code = this->hashCompositeKey(key); - std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + std::size_t bucket_ref = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); while (bucket_ref != 0) { DEBUG_ASSERT(bucket_ref != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.compositeKeyCollisionCheck(key, bucket)) { // Match located. - return reinterpret_cast(bucket + kValueOffset); + return reinterpret_cast(bucket + kValueOffset); } - bucket_ref = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); + bucket_ref = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); } // Reached the end of the chain and didn't find a match. @@ -625,23 +679,32 @@ template -const uint8_t* FastSeparateChainingHashTable - ::getSingleCompositeKey(const std::vector &key, int index) const { +const std::uint8_t* FastSeparateChainingHashTable:: + getSingleCompositeKey(const std::vector &key, int index) const { DEBUG_ASSERT(!allow_duplicate_keys); DEBUG_ASSERT(this->key_types_.size() == key.size()); const std::size_t hash_code = this->hashCompositeKey(key); - std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + std::size_t bucket_ref = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); while (bucket_ref != 0) { DEBUG_ASSERT(bucket_ref != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.compositeKeyCollisionCheck(key, bucket)) { // Match located. - return reinterpret_cast(bucket + kValueOffset)+this->payload_offsets_[index]; + return reinterpret_cast(bucket + kValueOffset) + + this->payload_offsets_[index]; } - bucket_ref = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); + bucket_ref = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); } // Reached the end of the chain and didn't find a match. @@ -652,26 +715,38 @@ template -void FastSeparateChainingHashTable - ::getAll(const TypedValue &key, std::vector *values) const { +void FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::getAll(const TypedValue &key, + std::vector *values) + const { DEBUG_ASSERT(this->key_types_.size() == 1); - DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); + DEBUG_ASSERT( + key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); const std::size_t hash_code = key.getHash(); - std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + std::size_t bucket_ref = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); while (bucket_ref != 0) { DEBUG_ASSERT(bucket_ref != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.scalarKeyCollisionCheck(key, bucket)) { // Match located. - values->push_back(reinterpret_cast(bucket + kValueOffset)); + values->push_back( + reinterpret_cast(bucket + kValueOffset)); if (!allow_duplicate_keys) { return; } } - bucket_ref = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); + bucket_ref = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); } } @@ -679,25 +754,35 @@ template -void FastSeparateChainingHashTable - ::getAllCompositeKey(const std::vector &key, std::vector *values) const { +void FastSeparateChainingHashTable:: + getAllCompositeKey(const std::vector &key, + std::vector *values) const { DEBUG_ASSERT(this->key_types_.size() == key.size()); const std::size_t hash_code = this->hashCompositeKey(key); - std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + std::size_t bucket_ref = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); while (bucket_ref != 0) { DEBUG_ASSERT(bucket_ref != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.compositeKeyCollisionCheck(key, bucket)) { // Match located. - values->push_back(reinterpret_cast(bucket + kValueOffset)); + values->push_back( + reinterpret_cast(bucket + kValueOffset)); if (!allow_duplicate_keys) { return; } } - bucket_ref = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); + bucket_ref = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); } } @@ -705,18 +790,22 @@ template -HashTablePutResult - FastSeparateChainingHashTable - ::putInternal(const TypedValue &key, - const std::size_t variable_key_size, - const uint8_t &value, - HashTablePreallocationState *prealloc_state) { +HashTablePutResult FastSeparateChainingHashTable:: + putInternal(const TypedValue &key, + const std::size_t variable_key_size, + const std::uint8_t &value, + HashTablePreallocationState *prealloc_state) { DEBUG_ASSERT(this->key_types_.size() == 1); - DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); + DEBUG_ASSERT( + key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); if (prealloc_state == nullptr) { // Early check for a free bucket. - if (header_->buckets_allocated.load(std::memory_order_relaxed) >= header_->num_buckets) { + if (header_->buckets_allocated.load(std::memory_order_relaxed) >= + header_->num_buckets) { return HashTablePutResult::kOutOfSpace; } @@ -763,10 +852,11 @@ HashTablePutResult writeScalarKeyToBucket(key, hash_code, bucket, prealloc_state); // Store the value by using placement new with ValueT's copy constructor. - new(static_cast(bucket) + kValueOffset) uint8_t(value); + new (static_cast(bucket) + kValueOffset) std::uint8_t(value); // Update the previous chain pointer to point to the new bucket. - pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release); + pending_chain_ptr->store(pending_chain_ptr_finish_value, + std::memory_order_release); // We're all done. return HashTablePutResult::kOK; @@ -776,17 +866,20 @@ template -HashTablePutResult - FastSeparateChainingHashTable - ::putCompositeKeyInternalFast(const std::vector &key, - const std::size_t variable_key_size, - const uint8_t *init_value_ptr, - HashTablePreallocationState *prealloc_state) { +HashTablePutResult FastSeparateChainingHashTable:: + putCompositeKeyInternalFast(const std::vector &key, + const std::size_t variable_key_size, + const std::uint8_t *init_value_ptr, + HashTablePreallocationState *prealloc_state) { DEBUG_ASSERT(this->key_types_.size() == key.size()); if (prealloc_state == nullptr) { // Early check for a free bucket. - if (header_->buckets_allocated.load(std::memory_order_relaxed) >= header_->num_buckets) { + if (header_->buckets_allocated.load(std::memory_order_relaxed) >= + header_->num_buckets) { return HashTablePutResult::kOutOfSpace; } @@ -832,12 +925,11 @@ HashTablePutResult // Write the key and hash. writeCompositeKeyToBucket(key, hash_code, bucket, prealloc_state); - // Store the value by using placement new with ValueT's copy constructor. -// new(static_cast(bucket) + kValueOffset) uint8_t(value); - uint8_t *value = static_cast(bucket) + kValueOffset; - memcpy(value, init_value_ptr, this->total_payload_size_); + std::uint8_t *value = static_cast(bucket) + kValueOffset; + memcpy(value, init_value_ptr, this->total_payload_size_); // Update the previous chain pointer to point to the new bucket. - pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release); + pending_chain_ptr->store(pending_chain_ptr_finish_value, + std::memory_order_release); // We're all done. return HashTablePutResult::kOK; @@ -847,13 +939,17 @@ template -uint8_t* FastSeparateChainingHashTable - ::upsertInternalFast(const TypedValue &key, - const std::size_t variable_key_size, - const std::uint8_t *init_value_ptr) { +std::uint8_t* FastSeparateChainingHashTable:: + upsertInternalFast(const TypedValue &key, + const std::size_t variable_key_size, + const std::uint8_t *init_value_ptr) { DEBUG_ASSERT(!allow_duplicate_keys); DEBUG_ASSERT(this->key_types_.size() == 1); - DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); + DEBUG_ASSERT( + key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); if (variable_key_size > 0) { // Don't allocate yet, since the key may already be present. However, we @@ -861,9 +957,11 @@ uint8_t* FastSeparateChainingHashTablevariable_length_bytes_allocated.load(std::memory_order_relaxed); - if ((allocated_bytes < variable_key_size) - && (allocated_bytes + variable_key_size > key_manager_.getVariableLengthKeyStorageSize())) { + std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load( + std::memory_order_relaxed); + if ((allocated_bytes < variable_key_size) && + (allocated_bytes + variable_key_size > + key_manager_.getVariableLengthKeyStorageSize())) { return nullptr; } } @@ -886,7 +984,8 @@ uint8_t* FastSeparateChainingHashTable(static_cast(bucket) + kValueOffset); + return reinterpret_cast(static_cast(bucket) + + kValueOffset); } } @@ -895,16 +994,15 @@ uint8_t* FastSeparateChainingHashTable(bucket) + kValueOffset) uint8_t(initial_value); - - uint8_t *value = static_cast(bucket) + kValueOffset; - if (init_value_ptr == nullptr) - memcpy(value, init_payload_, this->total_payload_size_); - else - memcpy(value, init_value_ptr, this->total_payload_size_); + std::uint8_t *value = static_cast(bucket) + kValueOffset; + if (init_value_ptr == nullptr) + memcpy(value, init_payload_, this->total_payload_size_); + else + memcpy(value, init_value_ptr, this->total_payload_size_); // Update the previous chain pointer to point to the new bucket. - pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release); + pending_chain_ptr->store(pending_chain_ptr_finish_value, + std::memory_order_release); // Return the value. return value; @@ -914,10 +1012,13 @@ template -uint8_t* FastSeparateChainingHashTable - ::upsertCompositeKeyInternalFast(const std::vector &key, - const std::uint8_t *init_value_ptr, - const std::size_t variable_key_size) { +std::uint8_t* FastSeparateChainingHashTable:: + upsertCompositeKeyInternalFast(const std::vector &key, + const std::uint8_t *init_value_ptr, + const std::size_t variable_key_size) { DEBUG_ASSERT(!allow_duplicate_keys); DEBUG_ASSERT(this->key_types_.size() == key.size()); @@ -927,9 +1028,11 @@ uint8_t* FastSeparateChainingHashTablevariable_length_bytes_allocated.load(std::memory_order_relaxed); - if ((allocated_bytes < variable_key_size) - && (allocated_bytes + variable_key_size > key_manager_.getVariableLengthKeyStorageSize())) { + std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load( + std::memory_order_relaxed); + if ((allocated_bytes < variable_key_size) && + (allocated_bytes + variable_key_size > + key_manager_.getVariableLengthKeyStorageSize())) { return nullptr; } } @@ -952,7 +1055,8 @@ uint8_t* FastSeparateChainingHashTable(static_cast(bucket) + kValueOffset); + return reinterpret_cast(static_cast(bucket) + + kValueOffset); } } @@ -960,17 +1064,16 @@ uint8_t* FastSeparateChainingHashTable(bucket) + kValueOffset; - uint8_t *value = static_cast(bucket) + kValueOffset; - if (init_value_ptr == nullptr) { - memcpy(value, init_payload_, this->total_payload_size_); - } else { - memcpy(value, init_value_ptr, this->total_payload_size_); - } + std::uint8_t *value = static_cast(bucket) + kValueOffset; + if (init_value_ptr == nullptr) { + memcpy(value, init_payload_, this->total_payload_size_); + } else { + memcpy(value, init_value_ptr, this->total_payload_size_); + } // Update the previous chaing pointer to point to the new bucket. - pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release); + pending_chain_ptr->store(pending_chain_ptr_finish_value, + std::memory_order_release); // Return the value. return value; @@ -980,13 +1083,19 @@ template -bool FastSeparateChainingHashTable - ::getNextEntry(TypedValue *key, const uint8_t **value, std::size_t *entry_num) const { +bool FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::getNextEntry(TypedValue *key, + const std::uint8_t **value, + std::size_t *entry_num) const { DEBUG_ASSERT(this->key_types_.size() == 1); if (*entry_num < header_->buckets_allocated.load(std::memory_order_relaxed)) { - const char *bucket = static_cast(buckets_) + (*entry_num) * bucket_size_; + const char *bucket = + static_cast(buckets_) + (*entry_num) * bucket_size_; *key = key_manager_.getKeyComponentTyped(bucket, 0); - *value = reinterpret_cast(bucket + kValueOffset); + *value = reinterpret_cast(bucket + kValueOffset); ++(*entry_num); return true; } else { @@ -998,18 +1107,22 @@ template -bool FastSeparateChainingHashTable - ::getNextEntryCompositeKey(std::vector *key, - const uint8_t **value, - std::size_t *entry_num) const { +bool FastSeparateChainingHashTable:: + getNextEntryCompositeKey(std::vector *key, + const std::uint8_t **value, + std::size_t *entry_num) const { if (*entry_num < header_->buckets_allocated.load(std::memory_order_relaxed)) { - const char *bucket = static_cast(buckets_) + (*entry_num) * bucket_size_; - for (std::vector::size_type key_idx = 0; + const char *bucket = + static_cast(buckets_) + (*entry_num) * bucket_size_; + for (std::vector::size_type key_idx = 0; key_idx < this->key_types_.size(); ++key_idx) { key->emplace_back(key_manager_.getKeyComponentTyped(bucket, key_idx)); } - *value = reinterpret_cast(bucket + kValueOffset); + *value = reinterpret_cast(bucket + kValueOffset); ++(*entry_num); return true; } else { @@ -1021,29 +1134,38 @@ template -bool FastSeparateChainingHashTable - ::getNextEntryForKey(const TypedValue &key, - const std::size_t hash_code, - const uint8_t **value, - std::size_t *entry_num) const { +bool FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::getNextEntryForKey(const TypedValue &key, + const std::size_t hash_code, + const std::uint8_t **value, + std::size_t *entry_num) const { DEBUG_ASSERT(this->key_types_.size() == 1); - DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); + DEBUG_ASSERT( + key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); if (*entry_num == 0) { - *entry_num = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + *entry_num = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); } else if (*entry_num == std::numeric_limits::max()) { return false; } while (*entry_num != 0) { DEBUG_ASSERT(*entry_num != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (*entry_num - 1) * bucket_size_; - *entry_num = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (*entry_num - 1) * bucket_size_; + *entry_num = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.scalarKeyCollisionCheck(key, bucket)) { // Match located. - *value = reinterpret_cast(bucket + kValueOffset); + *value = reinterpret_cast(bucket + kValueOffset); if (*entry_num == 0) { // If this is the last bucket in the chain, prevent the next call from // starting over again. @@ -1061,28 +1183,36 @@ template -bool FastSeparateChainingHashTable - ::getNextEntryForCompositeKey(const std::vector &key, - const std::size_t hash_code, - const uint8_t **value, - std::size_t *entry_num) const { +bool FastSeparateChainingHashTable:: + getNextEntryForCompositeKey(const std::vector &key, + const std::size_t hash_code, + const std::uint8_t **value, + std::size_t *entry_num) const { DEBUG_ASSERT(this->key_types_.size() == key.size()); if (*entry_num == 0) { - *entry_num = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + *entry_num = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); } else if (*entry_num == std::numeric_limits::max()) { return false; } while (*entry_num != 0) { DEBUG_ASSERT(*entry_num != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (*entry_num - 1) * bucket_size_; - *entry_num = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (*entry_num - 1) * bucket_size_; + *entry_num = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.compositeKeyCollisionCheck(key, bucket)) { // Match located. - *value = reinterpret_cast(bucket + kValueOffset); + *value = reinterpret_cast(bucket + kValueOffset); if (*entry_num == 0) { // If this is the last bucket in the chain, prevent the next call from // starting over again. @@ -1100,23 +1230,32 @@ template -bool FastSeparateChainingHashTable - ::hasKey(const TypedValue &key) const { +bool FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::hasKey(const TypedValue &key) const { DEBUG_ASSERT(this->key_types_.size() == 1); - DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); + DEBUG_ASSERT( + key.isPlausibleInstanceOf(this->key_types_.front()->getSignature())); const std::size_t hash_code = key.getHash(); - std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + std::size_t bucket_ref = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); while (bucket_ref != 0) { DEBUG_ASSERT(bucket_ref != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.scalarKeyCollisionCheck(key, bucket)) { // Find a match. return true; } - bucket_ref = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); + bucket_ref = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); } return false; } @@ -1125,22 +1264,31 @@ template -bool FastSeparateChainingHashTable - ::hasCompositeKey(const std::vector &key) const { +bool FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::hasCompositeKey(const std::vector &key) + const { DEBUG_ASSERT(this->key_types_.size() == key.size()); const std::size_t hash_code = this->hashCompositeKey(key); - std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); + std::size_t bucket_ref = + slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed); while (bucket_ref != 0) { DEBUG_ASSERT(bucket_ref != std::numeric_limits::max()); - const char *bucket = static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; - const std::size_t bucket_hash = *reinterpret_cast( + const char *bucket = + static_cast(buckets_) + (bucket_ref - 1) * bucket_size_; + const std::size_t bucket_hash = *reinterpret_cast( bucket + sizeof(std::atomic)); - if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) { + if ((bucket_hash == hash_code) && + key_manager_.compositeKeyCollisionCheck(key, bucket)) { // Find a match. return true; } - bucket_ref = reinterpret_cast*>(bucket)->load(std::memory_order_relaxed); + bucket_ref = + reinterpret_cast *>(bucket)->load( + std::memory_order_relaxed); } return false; } @@ -1149,10 +1297,13 @@ template -void FastSeparateChainingHashTable - ::resize(const std::size_t extra_buckets, - const std::size_t extra_variable_storage, - const std::size_t retry_num) { +void FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::resize(const std::size_t extra_buckets, + const std::size_t extra_variable_storage, + const std::size_t retry_num) { DEBUG_ASSERT(resizable); // A retry should never be necessary with this implementation of HashTable. @@ -1178,33 +1329,36 @@ void FastSeparateChainingHashTablenum_buckets + extra_buckets / 2) * kHashTableLoadFactor * 2); - std::size_t variable_storage_required - = (resized_num_slots / kHashTableLoadFactor) * key_manager_.getEstimatedVariableKeySize(); - const std::size_t original_variable_storage_used - = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed); + std::size_t variable_storage_required = + (resized_num_slots / kHashTableLoadFactor) * + key_manager_.getEstimatedVariableKeySize(); + const std::size_t original_variable_storage_used = + header_->variable_length_bytes_allocated.load(std::memory_order_relaxed); // If this resize was triggered by a too-large variable-length key, bump up // the variable-length storage requirement. - if ((extra_variable_storage > 0) - && (extra_variable_storage + original_variable_storage_used - > key_manager_.getVariableLengthKeyStorageSize())) { + if ((extra_variable_storage > 0) && + (extra_variable_storage + original_variable_storage_used > + key_manager_.getVariableLengthKeyStorageSize())) { variable_storage_required += extra_variable_storage; } - const std::size_t resized_memory_required - = sizeof(Header) - + resized_num_slots * sizeof(std::atomic) - + (resized_num_slots / kHashTableLoadFactor) * bucket_size_ - + variable_storage_required; - const std::size_t resized_storage_slots - = this->storage_manager_->SlotsNeededForBytes(resized_memory_required); + const std::size_t resized_memory_required = + sizeof(Header) + resized_num_slots * sizeof(std::atomic) + + (resized_num_slots / kHashTableLoadFactor) * bucket_size_ + + variable_storage_required; + const std::size_t resized_storage_slots = + this->storage_manager_->SlotsNeededForBytes(resized_memory_required); if (resized_storage_slots == 0) { - FATAL_ERROR("Storage requirement for resized SeparateChainingHashTable " - "exceeds maximum allocation size."); + FATAL_ERROR( + "Storage requirement for resized SeparateChainingHashTable " + "exceeds maximum allocation size."); } // Get a new StorageBlob to hold the resized hash table. - const block_id resized_blob_id = this->storage_manager_->createBlob(resized_storage_slots); - MutableBlobReference resized_blob = this->storage_manager_->getBlobMutable(resized_blob_id); + const block_id resized_blob_id = + this->storage_manager_->createBlob(resized_storage_slots); + MutableBlobReference resized_blob = + this->storage_manager_->getBlobMutable(resized_blob_id); // Locate data structures inside the new StorageBlob. void *aligned_memory_start = resized_blob->getMemoryMutable(); @@ -1212,12 +1366,12 @@ void FastSeparateChainingHashTablegetMemoryMutable()) { // Again, should be impossible. DEV_WARNING("In SeparateChainingHashTable::resize(), StorageBlob " @@ -1227,59 +1381,63 @@ void FastSeparateChainingHashTable(aligned_memory_start); - aligned_memory_start = static_cast(aligned_memory_start) + sizeof(Header); + Header *resized_header = static_cast
(aligned_memory_start); + aligned_memory_start = + static_cast(aligned_memory_start) + sizeof(Header); available_memory -= sizeof(Header); // As in constructor, recompute the number of slots and buckets using the // actual available memory. - std::size_t resized_num_buckets - = (available_memory - extra_variable_storage) - / (kHashTableLoadFactor * sizeof(std::atomic) - + bucket_size_ - + key_manager_.getEstimatedVariableKeySize()); - resized_num_slots = get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor); + std::size_t resized_num_buckets = + (available_memory - extra_variable_storage) / + (kHashTableLoadFactor * sizeof(std::atomic) + bucket_size_ + + key_manager_.getEstimatedVariableKeySize()); + resized_num_slots = + get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor); resized_num_buckets = resized_num_slots / kHashTableLoadFactor; // Locate slot array. - std::atomic *resized_slots = static_cast*>(aligned_memory_start); - aligned_memory_start = static_cast(aligned_memory_start) - + sizeof(std::atomic) * resized_num_slots; + std::atomic *resized_slots = + static_cast *>(aligned_memory_start); + aligned_memory_start = static_cast(aligned_memory_start) + + sizeof(std::atomic) * resized_num_slots; available_memory -= sizeof(std::atomic) * resized_num_slots; // As in constructor, we will be extra paranoid and use align() to locate the // start of the array of buckets, as well. void *resized_buckets = aligned_memory_start; - if (align(kBucketAlignment, - bucket_size_, - resized_buckets, - available_memory) - == nullptr) { - FATAL_ERROR("StorageBlob used to hold resized SeparateChainingHashTable " - "is too small to meet alignment requirements of buckets."); + if (align( + kBucketAlignment, bucket_size_, resized_buckets, available_memory) == + nullptr) { + FATAL_ERROR( + "StorageBlob used to hold resized SeparateChainingHashTable " + "is too small to meet alignment requirements of buckets."); } else if (resized_buckets != aligned_memory_start) { - DEV_WARNING("Bucket array start position adjusted to meet alignment " - "requirement for SeparateChainingHashTable's value type."); - if (resized_num_buckets * bucket_size_ + variable_storage_required > available_memory) { + DEV_WARNING( + "Bucket array start position adjusted to meet alignment " + "requirement for SeparateChainingHashTable's value type."); + if (resized_num_buckets * bucket_size_ + variable_storage_required > + available_memory) { --resized_num_buckets; } } - aligned_memory_start = static_cast(aligned_memory_start) - + resized_num_buckets * bucket_size_; + aligned_memory_start = static_cast(aligned_memory_start) + + resized_num_buckets * bucket_size_; available_memory -= resized_num_buckets * bucket_size_; void *resized_variable_length_key_storage = aligned_memory_start; const std::size_t resized_variable_length_key_storage_size = available_memory; - const std::size_t original_buckets_used = header_->buckets_allocated.load(std::memory_order_relaxed); + const std::size_t original_buckets_used = + header_->buckets_allocated.load(std::memory_order_relaxed); // Initialize the header. resized_header->num_slots = resized_num_slots; resized_header->num_buckets = resized_num_buckets; - resized_header->buckets_allocated.store(original_buckets_used, std::memory_order_relaxed); + resized_header->buckets_allocated.store(original_buckets_used, + std::memory_order_relaxed); resized_header->variable_length_bytes_allocated.store( - original_variable_storage_used, - std::memory_order_relaxed); + original_variable_storage_used, std::memory_order_relaxed); // Bulk-copy buckets. This is safe because: // 1. The "next" pointers will be adjusted when rebuilding chains below. @@ -1298,30 +1456,34 @@ void FastSeparateChainingHashTable(buckets_) + kValueOffset; - void *current_value_resized = static_cast(resized_buckets) + kValueOffset; - for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; ++bucket_num) { + void *current_value_original = static_cast(buckets_) + kValueOffset; + void *current_value_resized = + static_cast(resized_buckets) + kValueOffset; + for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; + ++bucket_num) { // Use a move constructor if available to avoid a deep-copy, since resizes // always succeed. - new (current_value_resized) uint8_t(std::move(*static_cast(current_value_original))); - current_value_original = static_cast(current_value_original) + bucket_size_; - current_value_resized = static_cast(current_value_resized) + bucket_size_; + new (current_value_resized) std::uint8_t( + std::move(*static_cast(current_value_original))); + current_value_original = + static_cast(current_value_original) + bucket_size_; + current_value_resized = + static_cast(current_value_resized) + bucket_size_; } // Copy over variable-length key components, if any. if (original_variable_storage_used > 0) { - DEBUG_ASSERT(original_variable_storage_used - == key_manager_.getNextVariableLengthKeyOffset()); - DEBUG_ASSERT(original_variable_storage_used <= resized_variable_length_key_storage_size); + DEBUG_ASSERT(original_variable_storage_used == + key_manager_.getNextVariableLengthKeyOffset()); + DEBUG_ASSERT(original_variable_storage_used <= + resized_variable_length_key_storage_size); std::memcpy(resized_variable_length_key_storage, key_manager_.getVariableLengthKeyStorage(), original_variable_storage_used); } // Destroy values in the original hash table, if neccesary, - DestroyValues(buckets_, - original_buckets_used, - bucket_size_); + DestroyValues(buckets_, original_buckets_used, bucket_size_); // Make resized structures active. std::swap(this->blob_, resized_blob); @@ -1340,17 +1502,18 @@ void FastSeparateChainingHashTable *next_ptr - = static_cast*>(current_bucket); - const std::size_t hash_code = *reinterpret_cast( - static_cast(current_bucket) + sizeof(std::atomic)); + for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; + ++bucket_num) { + std::atomic *next_ptr = + static_cast *>(current_bucket); + const std::size_t hash_code = *reinterpret_cast( + static_cast(current_bucket) + + sizeof(std::atomic)); const std::size_t slot_number = hash_code % header_->num_slots; std::size_t slot_ptr_value = 0; - if (slots_[slot_number].compare_exchange_strong(slot_ptr_value, - bucket_num + 1, - std::memory_order_relaxed)) { + if (slots_[slot_number].compare_exchange_strong( + slot_ptr_value, bucket_num + 1, std::memory_order_relaxed)) { // This bucket is the first in the chain for this block, so reset its // next pointer to 0. next_ptr->store(0, std::memory_order_relaxed); @@ -1360,7 +1523,7 @@ void FastSeparateChainingHashTablestore(slot_ptr_value, std::memory_order_relaxed); slots_[slot_number].store(bucket_num + 1, std::memory_order_relaxed); } - current_bucket = static_cast(current_bucket) + bucket_size_; + current_bucket = static_cast(current_bucket) + bucket_size_; } } @@ -1368,10 +1531,13 @@ template -bool FastSeparateChainingHashTable - ::preallocateForBulkInsert(const std::size_t total_entries, - const std::size_t total_variable_key_size, - HashTablePreallocationState *prealloc_state) { +bool FastSeparateChainingHashTable:: + preallocateForBulkInsert(const std::size_t total_entries, + const std::size_t total_variable_key_size, + HashTablePreallocationState *prealloc_state) { DEBUG_ASSERT(allow_duplicate_keys); if (!key_manager_.allocateVariableLengthKeyStorage(total_variable_key_size)) { return false; @@ -1382,12 +1548,15 @@ bool FastSeparateChainingHashTablenum_buckets', their respective // rollbacks might happen in such an order that some bucket ranges get // skipped, while others might get double-allocated later. - std::size_t original_buckets_allocated = header_->buckets_allocated.load(std::memory_order_relaxed); - std::size_t buckets_post_allocation = original_buckets_allocated + total_entries; - while ((buckets_post_allocation <= header_->num_buckets) - && !header_->buckets_allocated.compare_exchange_weak(original_buckets_allocated, - buckets_post_allocation, - std::memory_order_relaxed)) { + std::size_t original_buckets_allocated = + header_->buckets_allocated.load(std::memory_order_relaxed); + std::size_t buckets_post_allocation = + original_buckets_allocated + total_entries; + while ((buckets_post_allocation <= header_->num_buckets) && + !header_->buckets_allocated.compare_exchange_weak( + original_buckets_allocated, + buckets_post_allocation, + std::memory_order_relaxed)) { buckets_post_allocation = original_buckets_allocated + total_entries; } @@ -1398,8 +1567,9 @@ bool FastSeparateChainingHashTablebucket_position = original_buckets_allocated; if (total_variable_key_size != 0) { - prealloc_state->variable_length_key_position - = key_manager_.incrementNextVariableLengthKeyOffset(total_variable_key_size); + prealloc_state->variable_length_key_position = + key_manager_.incrementNextVariableLengthKeyOffset( + total_variable_key_size); } return true; } @@ -1408,17 +1578,18 @@ template -void FastSeparateChainingHashTable - ::DestroyValues(void *hash_buckets, - const std::size_t num_buckets, - const std::size_t bucket_size) { - if (!std::is_trivially_destructible::value) { - void *value_ptr = static_cast(hash_buckets) + kValueOffset; - for (std::size_t bucket_num = 0; - bucket_num < num_buckets; - ++bucket_num) { - static_cast(value_ptr)->~uint8_t(); - value_ptr = static_cast(value_ptr) + bucket_size; +void FastSeparateChainingHashTable< + resizable, + serializable, + force_key_copy, + allow_duplicate_keys>::DestroyValues(void *hash_buckets, + const std::size_t num_buckets, + const std::size_t bucket_size) { + if (!std::is_trivially_destructible::value) { + void *value_ptr = static_cast(hash_buckets) + kValueOffset; + for (std::size_t bucket_num = 0; bucket_num < num_buckets; ++bucket_num) { + static_cast(value_ptr)->~uint8_t(); + value_ptr = static_cast(value_ptr) + bucket_size; } } } @@ -1427,39 +1598,45 @@ template -inline bool FastSeparateChainingHashTable - ::locateBucketForInsertion(const std::size_t hash_code, - const std::size_t variable_key_allocation_required, - void **bucket, - std::atomic **pending_chain_ptr, - std::size_t *pending_chain_ptr_finish_value, - HashTablePreallocationState *prealloc_state) { +inline bool FastSeparateChainingHashTable:: + locateBucketForInsertion(const std::size_t hash_code, + const std::size_t variable_key_allocation_required, + void **bucket, + std::atomic **pending_chain_ptr, + std::size_t *pending_chain_ptr_finish_value, + HashTablePreallocationState *prealloc_state) { DEBUG_ASSERT((prealloc_state == nullptr) || allow_duplicate_keys); if (*bucket == nullptr) { *pending_chain_ptr = &(slots_[hash_code % header_->num_slots]); } else { - *pending_chain_ptr = static_cast*>(*bucket); + *pending_chain_ptr = static_cast *>(*bucket); } for (;;) { std::size_t existing_chain_ptr = 0; - if ((*pending_chain_ptr)->compare_exchange_strong(existing_chain_ptr, -