@@ -1639,7 +1639,7 @@ HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::ExpandBuffer(
1639
1639
}
1640
1640
}
1641
1641
table_ = temporary_table;
1642
- Allocator::template BackingWriteBarrier (&table_);
1642
+ Allocator::BackingWriteBarrier (&table_);
1643
1643
1644
1644
HashTableBucketInitializer<Traits, Allocator, Value>::InitializeTable (
1645
1645
original_table, new_table_size);
@@ -1693,7 +1693,7 @@ Value* HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::RehashTo(
1693
1693
// This swaps the newly allocated buffer with the current one. The store to
1694
1694
// the current table has to be atomic to prevent races with concurrent marker.
1695
1695
AsAtomicPtr (&table_)->store (new_hash_table.table_ , std::memory_order_relaxed);
1696
- Allocator::template BackingWriteBarrier (&table_);
1696
+ Allocator::BackingWriteBarrier (&table_);
1697
1697
table_size_ = new_table_size;
1698
1698
1699
1699
new_hash_table.table_ = old_table;
@@ -1845,8 +1845,8 @@ void HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::swap(
1845
1845
// on the mutator thread, which is also the only one that writes to them, so
1846
1846
// there is *no* risk of data races when reading.
1847
1847
AtomicWriteSwap (table_, other.table_ );
1848
- Allocator::template BackingWriteBarrier (&table_);
1849
- Allocator::template BackingWriteBarrier (&other.table_ );
1848
+ Allocator::BackingWriteBarrier (&table_);
1849
+ Allocator::BackingWriteBarrier (&other.table_ );
1850
1850
if (IsWeak<ValueType>::value) {
1851
1851
// Weak processing is omitted when no backing store is present. In case such
1852
1852
// an empty table is later on used it needs to be strongified.
0 commit comments