/[pcsx2_0.9.7]/trunk/3rdparty/google/sparsehash/sparsehashtable.h
ViewVC logotype

Annotation of /trunk/3rdparty/google/sparsehash/sparsehashtable.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 8 - (hide annotations) (download)
Mon Sep 6 11:19:43 2010 UTC (9 years, 5 months ago) by william
File MIME type: text/plain
File size: 41129 byte(s)
Exported ./upsream/trunk @r3730 from http://pcsx2.googlecode.com/svn/trunk/
1 william 8 // Copyright (c) 2005, Google Inc.
2     // All rights reserved.
3     //
4     // Redistribution and use in source and binary forms, with or without
5     // modification, are permitted provided that the following conditions are
6     // met:
7     //
8     // * Redistributions of source code must retain the above copyright
9     // notice, this list of conditions and the following disclaimer.
10     // * Redistributions in binary form must reproduce the above
11     // copyright notice, this list of conditions and the following disclaimer
12     // in the documentation and/or other materials provided with the
13     // distribution.
14     // * Neither the name of Google Inc. nor the names of its
15     // contributors may be used to endorse or promote products derived from
16     // this software without specific prior written permission.
17     //
18     // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19     // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20     // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21     // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22     // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23     // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24     // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25     // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26     // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27     // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29    
30     // ---
31     // Author: Craig Silverstein
32     //
33     // A sparse hashtable is a particular implementation of
34     // a hashtable: one that is meant to minimize memory use.
35     // It does this by using a *sparse table* (cf sparsetable.h),
36     // which uses between 1 and 2 bits to store empty buckets
37     // (we may need another bit for hashtables that support deletion).
38     //
39     // When empty buckets are so cheap, an appealing hashtable
40     // implementation is internal probing, in which the hashtable
41     // is a single table, and collisions are resolved by trying
42     // to insert again in another bucket. The most cache-efficient
43     // internal probing schemes are linear probing (which suffers,
44     // alas, from clumping) and quadratic probing, which is what
45     // we implement by default.
46     //
47     // Deleted buckets are a bit of a pain. We have to somehow mark
48     // deleted buckets (the probing must distinguish them from empty
49     // buckets). The most principled way is to have another bitmap,
50     // but that's annoying and takes up space. Instead we let the
51     // user specify an "impossible" key. We set deleted buckets
52     // to have the impossible key.
53     //
54     // Note it is possible to change the value of the delete key
55     // on the fly; you can even remove it, though after that point
56     // the hashtable is insert_only until you set it again.
57     //
58     // You probably shouldn't use this code directly. Use
59     // <google/sparse_hash_table> or <google/sparse_hash_set> instead.
60     //
61     // You can modify the following, below:
62     // HT_OCCUPANCY_FLT -- how full before we double size
63     // HT_EMPTY_FLT -- how empty before we halve size
64     // HT_MIN_BUCKETS -- smallest bucket size
65     // HT_DEFAULT_STARTING_BUCKETS -- default bucket size at construct-time
66     //
67     // You can also change enlarge_resize_percent (which defaults to
68     // HT_OCCUPANCY_FLT), and shrink_resize_percent (which defaults to
69     // HT_EMPTY_FLT) with set_resizing_parameters().
70     //
71     // How to decide what values to use?
72     // shrink_resize_percent's default of .4 * OCCUPANCY_FLT, is probably good.
73     // HT_MIN_BUCKETS is probably unnecessary since you can specify
74     // (indirectly) the starting number of buckets at construct-time.
75     // For enlarge_resize_percent, you can use this chart to try to trade-off
76     // expected lookup time to the space taken up. By default, this
77     // code uses quadratic probing, though you can change it to linear
78     // via _JUMP below if you really want to.
79     //
80     // From http://www.augustana.ca/~mohrj/courses/1999.fall/csc210/lecture_notes/hashing.html
81     // NUMBER OF PROBES / LOOKUP Successful Unsuccessful
82     // Quadratic collision resolution 1 - ln(1-L) - L/2 1/(1-L) - L - ln(1-L)
83     // Linear collision resolution [1+1/(1-L)]/2 [1+1/(1-L)2]/2
84     //
85     // -- enlarge_resize_percent -- 0.10 0.50 0.60 0.75 0.80 0.90 0.99
86     // QUADRATIC COLLISION RES.
87     // probes/successful lookup 1.05 1.44 1.62 2.01 2.21 2.85 5.11
88     // probes/unsuccessful lookup 1.11 2.19 2.82 4.64 5.81 11.4 103.6
89     // LINEAR COLLISION RES.
90     // probes/successful lookup 1.06 1.5 1.75 2.5 3.0 5.5 50.5
91     // probes/unsuccessful lookup 1.12 2.5 3.6 8.5 13.0 50.0 5000.0
92     //
93     // The value type is required to be copy constructible and default
94     // constructible, but it need not be (and commonly isn't) assignable.
95    
96     #ifndef _SPARSEHASHTABLE_H_
97     #define _SPARSEHASHTABLE_H_
98    
99     #ifndef SPARSEHASH_STAT_UPDATE
100     #define SPARSEHASH_STAT_UPDATE(x) ((void) 0)
101     #endif
102    
103     // The probing method
104     // Linear probing
105     // #define JUMP_(key, num_probes) ( 1 )
106     // Quadratic-ish probing
107     #define JUMP_(key, num_probes) ( num_probes )
108    
109    
110     // Hashtable class, used to implement the hashed associative containers
111     // hash_set and hash_map.
112    
113     #include <google/sparsehash/sparseconfig.h>
114     #include <assert.h>
115     #include <algorithm> // For swap(), eg
116     #include <iterator> // for facts about iterator tags
117     #include <utility> // for pair<>
118     #include <google/sparsetable> // Since that's basically what we are
119    
120     _START_GOOGLE_NAMESPACE_
121    
122     using STL_NAMESPACE::pair;
123    
124     // Alloc is completely ignored. It is present as a template parameter only
125     // for the sake of being compatible with the old SGI hashtable interface.
126     // TODO(csilvers): is that the right thing to do?
127    
128     template <class Value, class Key, class HashFcn,
129     class ExtractKey, class EqualKey, class Alloc>
130     class sparse_hashtable;
131    
132     template <class V, class K, class HF, class ExK, class EqK, class A>
133     struct sparse_hashtable_iterator;
134    
135     template <class V, class K, class HF, class ExK, class EqK, class A>
136     struct sparse_hashtable_const_iterator;
137    
138     // As far as iterating, we're basically just a sparsetable
139     // that skips over deleted elements.
140     template <class V, class K, class HF, class ExK, class EqK, class A>
141     struct sparse_hashtable_iterator {
142     public:
143     typedef sparse_hashtable_iterator<V,K,HF,ExK,EqK,A> iterator;
144     typedef sparse_hashtable_const_iterator<V,K,HF,ExK,EqK,A> const_iterator;
145     typedef typename sparsetable<V>::nonempty_iterator st_iterator;
146    
147     typedef STL_NAMESPACE::forward_iterator_tag iterator_category;
148     typedef V value_type;
149     typedef ptrdiff_t difference_type;
150     typedef size_t size_type;
151     typedef V& reference; // Value
152     typedef V* pointer;
153    
154     // "Real" constructor and default constructor
155     sparse_hashtable_iterator(const sparse_hashtable<V,K,HF,ExK,EqK,A> *h,
156     st_iterator it, st_iterator it_end)
157     : ht(h), pos(it), end(it_end) { advance_past_deleted(); }
158     sparse_hashtable_iterator() { } // not ever used internally
159     // The default destructor is fine; we don't define one
160     // The default operator= is fine; we don't define one
161    
162     // Happy dereferencer
163     reference operator*() const { return *pos; }
164     pointer operator->() const { return &(operator*()); }
165    
166     // Arithmetic. The only hard part is making sure that
167     // we're not on a marked-deleted array element
168     void advance_past_deleted() {
169     while ( pos != end && ht->test_deleted(*this) )
170     ++pos;
171     }
172     iterator& operator++() {
173     assert(pos != end); ++pos; advance_past_deleted(); return *this;
174     }
175     iterator operator++(int) { iterator tmp(*this); ++*this; return tmp; }
176    
177     // Comparison.
178     bool operator==(const iterator& it) const { return pos == it.pos; }
179     bool operator!=(const iterator& it) const { return pos != it.pos; }
180    
181    
182     // The actual data
183     const sparse_hashtable<V,K,HF,ExK,EqK,A> *ht;
184     st_iterator pos, end;
185     };
186    
187     // Now do it all again, but with const-ness!
188     template <class V, class K, class HF, class ExK, class EqK, class A>
189     struct sparse_hashtable_const_iterator {
190     public:
191     typedef sparse_hashtable_iterator<V,K,HF,ExK,EqK,A> iterator;
192     typedef sparse_hashtable_const_iterator<V,K,HF,ExK,EqK,A> const_iterator;
193     typedef typename sparsetable<V>::const_nonempty_iterator st_iterator;
194    
195     typedef STL_NAMESPACE::forward_iterator_tag iterator_category;
196     typedef V value_type;
197     typedef ptrdiff_t difference_type;
198     typedef size_t size_type;
199     typedef const V& reference; // Value
200     typedef const V* pointer;
201    
202     // "Real" constructor and default constructor
203     sparse_hashtable_const_iterator(const sparse_hashtable<V,K,HF,ExK,EqK,A> *h,
204     st_iterator it, st_iterator it_end)
205     : ht(h), pos(it), end(it_end) { advance_past_deleted(); }
206     // This lets us convert regular iterators to const iterators
207     sparse_hashtable_const_iterator() { } // never used internally
208     sparse_hashtable_const_iterator(const iterator &it)
209     : ht(it.ht), pos(it.pos), end(it.end) { }
210     // The default destructor is fine; we don't define one
211     // The default operator= is fine; we don't define one
212    
213     // Happy dereferencer
214     reference operator*() const { return *pos; }
215     pointer operator->() const { return &(operator*()); }
216    
217     // Arithmetic. The only hard part is making sure that
218     // we're not on a marked-deleted array element
219     void advance_past_deleted() {
220     while ( pos != end && ht->test_deleted(*this) )
221     ++pos;
222     }
223     const_iterator& operator++() {
224     assert(pos != end); ++pos; advance_past_deleted(); return *this;
225     }
226     const_iterator operator++(int) { const_iterator tmp(*this); ++*this; return tmp; }
227    
228     // Comparison.
229     bool operator==(const const_iterator& it) const { return pos == it.pos; }
230     bool operator!=(const const_iterator& it) const { return pos != it.pos; }
231    
232    
233     // The actual data
234     const sparse_hashtable<V,K,HF,ExK,EqK,A> *ht;
235     st_iterator pos, end;
236     };
237    
238     // And once again, but this time freeing up memory as we iterate
239     template <class V, class K, class HF, class ExK, class EqK, class A>
240     struct sparse_hashtable_destructive_iterator {
241     public:
242     typedef sparse_hashtable_destructive_iterator<V,K,HF,ExK,EqK,A> iterator;
243     typedef typename sparsetable<V>::destructive_iterator st_iterator;
244    
245     typedef STL_NAMESPACE::forward_iterator_tag iterator_category;
246     typedef V value_type;
247     typedef ptrdiff_t difference_type;
248     typedef size_t size_type;
249     typedef V& reference; // Value
250     typedef V* pointer;
251    
252     // "Real" constructor and default constructor
253     sparse_hashtable_destructive_iterator(const
254     sparse_hashtable<V,K,HF,ExK,EqK,A> *h,
255     st_iterator it, st_iterator it_end)
256     : ht(h), pos(it), end(it_end) { advance_past_deleted(); }
257     sparse_hashtable_destructive_iterator() { } // never used internally
258     // The default destructor is fine; we don't define one
259     // The default operator= is fine; we don't define one
260    
261     // Happy dereferencer
262     reference operator*() const { return *pos; }
263     pointer operator->() const { return &(operator*()); }
264    
265     // Arithmetic. The only hard part is making sure that
266     // we're not on a marked-deleted array element
267     void advance_past_deleted() {
268     while ( pos != end && ht->test_deleted(*this) )
269     ++pos;
270     }
271     iterator& operator++() {
272     assert(pos != end); ++pos; advance_past_deleted(); return *this;
273     }
274     iterator operator++(int) { iterator tmp(*this); ++*this; return tmp; }
275    
276     // Comparison.
277     bool operator==(const iterator& it) const { return pos == it.pos; }
278     bool operator!=(const iterator& it) const { return pos != it.pos; }
279    
280    
281     // The actual data
282     const sparse_hashtable<V,K,HF,ExK,EqK,A> *ht;
283     st_iterator pos, end;
284     };
285    
286    
287     template <class Value, class Key, class HashFcn,
288     class ExtractKey, class EqualKey, class Alloc>
289     class sparse_hashtable {
290     public:
291     typedef Key key_type;
292     typedef Value value_type;
293     typedef HashFcn hasher;
294     typedef EqualKey key_equal;
295    
296     typedef size_t size_type;
297     typedef ptrdiff_t difference_type;
298     typedef value_type* pointer;
299     typedef const value_type* const_pointer;
300     typedef value_type& reference;
301     typedef const value_type& const_reference;
302     typedef sparse_hashtable_iterator<Value, Key, HashFcn,
303     ExtractKey, EqualKey, Alloc>
304     iterator;
305    
306     typedef sparse_hashtable_const_iterator<Value, Key, HashFcn,
307     ExtractKey, EqualKey, Alloc>
308     const_iterator;
309    
310     typedef sparse_hashtable_destructive_iterator<Value, Key, HashFcn,
311     ExtractKey, EqualKey, Alloc>
312     destructive_iterator;
313    
314    
315     // How full we let the table get before we resize. Knuth says .8 is
316     // good -- higher causes us to probe too much, though saves memory
317     static const float HT_OCCUPANCY_FLT; // = 0.8f;
318    
319     // How empty we let the table get before we resize lower.
320     // It should be less than OCCUPANCY_FLT / 2 or we thrash resizing
321     static const float HT_EMPTY_FLT; // = 0.4 * HT_OCCUPANCY_FLT;
322    
323     // Minimum size we're willing to let hashtables be.
324     // Must be a power of two, and at least 4.
325     // Note, however, that for a given hashtable, the minimum size is
326     // determined by the first constructor arg, and may be >HT_MIN_BUCKETS.
327     static const size_t HT_MIN_BUCKETS = 4;
328    
329     // By default, if you don't specify a hashtable size at
330     // construction-time, we use this size. Must be a power of two, and
331     // at least HT_MIN_BUCKETS.
332     static const size_t HT_DEFAULT_STARTING_BUCKETS = 32;
333    
334     // ITERATOR FUNCTIONS
335     iterator begin() { return iterator(this, table.nonempty_begin(),
336     table.nonempty_end()); }
337     iterator end() { return iterator(this, table.nonempty_end(),
338     table.nonempty_end()); }
339     const_iterator begin() const { return const_iterator(this,
340     table.nonempty_begin(),
341     table.nonempty_end()); }
342     const_iterator end() const { return const_iterator(this,
343     table.nonempty_end(),
344     table.nonempty_end()); }
345    
346     // This is used when resizing
347     destructive_iterator destructive_begin() {
348     return destructive_iterator(this, table.destructive_begin(),
349     table.destructive_end());
350     }
351     destructive_iterator destructive_end() {
352     return destructive_iterator(this, table.destructive_end(),
353     table.destructive_end());
354     }
355    
356    
357     // ACCESSOR FUNCTIONS for the things we templatize on, basically
358     hasher hash_funct() const { return hash; }
359     key_equal key_eq() const { return equals; }
360    
361     // We need to copy values when we set the special marker for deleted
362     // elements, but, annoyingly, we can't just use the copy assignment
363     // operator because value_type might not be assignable (it's often
364     // pair<const X, Y>). We use explicit destructor invocation and
365     // placement new to get around this. Arg.
366     private:
367     void set_value(value_type* dst, const value_type src) {
368     dst->~value_type(); // delete the old value, if any
369     new(dst) value_type(src);
370     }
371    
372     // This is used as a tag for the copy constructor, saying to destroy its
373     // arg We have two ways of destructively copying: with potentially growing
374     // the hashtable as we copy, and without. To make sure the outside world
375     // can't do a destructive copy, we make the typename private.
376     enum MoveDontCopyT {MoveDontCopy, MoveDontGrow};
377    
378    
379     // DELETE HELPER FUNCTIONS
380     // This lets the user describe a key that will indicate deleted
381     // table entries. This key should be an "impossible" entry --
382     // if you try to insert it for real, you won't be able to retrieve it!
383     // (NB: while you pass in an entire value, only the key part is looked
384     // at. This is just because I don't know how to assign just a key.)
385     private:
386     void squash_deleted() { // gets rid of any deleted entries we have
387     if ( num_deleted ) { // get rid of deleted before writing
388     sparse_hashtable tmp(MoveDontGrow, *this);
389     swap(tmp); // now we are tmp
390     }
391     assert(num_deleted == 0);
392     }
393    
394     public:
395     void set_deleted_key(const value_type &val) {
396     // It's only safe to change what "deleted" means if we purge deleted guys
397     squash_deleted();
398     use_deleted = true;
399     set_value(&delval, val); // save the key (and rest of val too)
400     }
401     void clear_deleted_key() {
402     squash_deleted();
403     use_deleted = false;
404     }
405    
406     // These are public so the iterators can use them
407     // True if the item at position bucknum is "deleted" marker
408     bool test_deleted(size_type bucknum) const {
409     // The num_deleted test is crucial for read(): after read(), the ht values
410     // are garbage, and we don't want to think some of them are deleted.
411     return (use_deleted && num_deleted > 0 && table.test(bucknum) &&
412     equals(get_key(delval), get_key(table.get(bucknum))));
413     }
414     bool test_deleted(const iterator &it) const {
415     return (use_deleted && num_deleted > 0 &&
416     equals(get_key(delval), get_key(*it)));
417     }
418     bool test_deleted(const const_iterator &it) const {
419     return (use_deleted && num_deleted > 0 &&
420     equals(get_key(delval), get_key(*it)));
421     }
422     bool test_deleted(const destructive_iterator &it) const {
423     return (use_deleted && num_deleted > 0 &&
424     equals(get_key(delval), get_key(*it)));
425     }
426     // Set it so test_deleted is true. true if object didn't used to be deleted
427     // See below (at erase()) to explain why we allow const_iterators
428     bool set_deleted(const_iterator &it) {
429     assert(use_deleted); // bad if set_deleted_key() wasn't called
430     bool retval = !test_deleted(it);
431     // &* converts from iterator to value-type
432     set_value(const_cast<value_type*>(&(*it)), delval);
433     return retval;
434     }
435     // Set it so test_deleted is false. true if object used to be deleted
436     bool clear_deleted(const_iterator &it) {
437     assert(use_deleted); // bad if set_deleted_key() wasn't called
438     // happens automatically when we assign something else in its place
439     return test_deleted(it);
440     }
441    
442    
443     // FUNCTIONS CONCERNING SIZE
444     size_type size() const { return table.num_nonempty() - num_deleted; }
445     // Buckets are always a power of 2
446     size_type max_size() const { return (size_type(-1) >> 1U) + 1; }
447     bool empty() const { return size() == 0; }
448     size_type bucket_count() const { return table.size(); }
449     size_type max_bucket_count() const { return max_size(); }
450    
451     private:
452     // Because of the above, size_type(-1) is never legal; use it for errors
453     static const size_type ILLEGAL_BUCKET = size_type(-1);
454    
455     private:
456     // This is the smallest size a hashtable can be without being too crowded
457     // If you like, you can give a min #buckets as well as a min #elts
458     size_type min_size(size_type num_elts, size_type min_buckets_wanted) {
459     size_type sz = HT_MIN_BUCKETS;
460     while ( sz < min_buckets_wanted || num_elts >= sz * enlarge_resize_percent )
461     sz *= 2;
462     return sz;
463     }
464    
465     // Used after a string of deletes
466     void maybe_shrink() {
467     assert(table.num_nonempty() >= num_deleted);
468     assert((bucket_count() & (bucket_count()-1)) == 0); // is a power of two
469     assert(bucket_count() >= HT_MIN_BUCKETS);
470    
471     // If you construct a hashtable with < HT_DEFAULT_STARTING_BUCKETS,
472     // we'll never shrink until you get relatively big, and we'll never
473     // shrink below HT_DEFAULT_STARTING_BUCKETS. Otherwise, something
474     // like "dense_hash_set<int> x; x.insert(4); x.erase(4);" will
475     // shrink us down to HT_MIN_BUCKETS buckets, which is too small.
476     if (shrink_threshold > 0
477     && (table.num_nonempty()-num_deleted) < shrink_threshold &&
478     bucket_count() > HT_DEFAULT_STARTING_BUCKETS ) {
479     size_type sz = bucket_count() / 2; // find how much we should shrink
480     while ( sz > HT_DEFAULT_STARTING_BUCKETS &&
481     (table.num_nonempty() - num_deleted) <= sz *
482     shrink_resize_percent )
483     sz /= 2; // stay a power of 2
484     sparse_hashtable tmp(MoveDontCopy, *this, sz);
485     swap(tmp); // now we are tmp
486     }
487     consider_shrink = false; // because we just considered it
488     }
489    
490     // We'll let you resize a hashtable -- though this makes us copy all!
491     // When you resize, you say, "make it big enough for this many more elements"
492     void resize_delta(size_type delta) {
493     if ( consider_shrink ) // see if lots of deletes happened
494     maybe_shrink();
495     if ( bucket_count() >= HT_MIN_BUCKETS &&
496     (table.num_nonempty() + delta) <= enlarge_threshold )
497     return; // we're ok as we are
498    
499     // Sometimes, we need to resize just to get rid of all the
500     // "deleted" buckets that are clogging up the hashtable. So when
501     // deciding whether to resize, count the deleted buckets (which
502     // are currently taking up room). But later, when we decide what
503     // size to resize to, *don't* count deleted buckets, since they
504     // get discarded during the resize.
505     const size_type needed_size = min_size(table.num_nonempty() + delta, 0);
506     if ( needed_size > bucket_count() ) { // we don't have enough buckets
507     const size_type resize_to = min_size(table.num_nonempty() - num_deleted
508     + delta, 0);
509     sparse_hashtable tmp(MoveDontCopy, *this, resize_to);
510     swap(tmp); // now we are tmp
511     }
512     }
513    
514     // Used to actually do the rehashing when we grow/shrink a hashtable
515     void copy_from(const sparse_hashtable &ht, size_type min_buckets_wanted) {
516     clear(); // clear table, set num_deleted to 0
517    
518     // If we need to change the size of our table, do it now
519     const size_type resize_to = min_size(ht.size(), min_buckets_wanted);
520     if ( resize_to > bucket_count() ) { // we don't have enough buckets
521     table.resize(resize_to); // sets the number of buckets
522     reset_thresholds();
523     }
524    
525     // We use a normal iterator to get non-deleted bcks from ht
526     // We could use insert() here, but since we know there are
527     // no duplicates and no deleted items, we can be more efficient
528     assert( (bucket_count() & (bucket_count()-1)) == 0); // a power of two
529     for ( const_iterator it = ht.begin(); it != ht.end(); ++it ) {
530     size_type num_probes = 0; // how many times we've probed
531     size_type bucknum;
532     const size_type bucket_count_minus_one = bucket_count() - 1;
533     for (bucknum = hash(get_key(*it)) & bucket_count_minus_one;
534     table.test(bucknum); // not empty
535     bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one) {
536     ++num_probes;
537     assert(num_probes < bucket_count()); // or else the hashtable is full
538     }
539     table.set(bucknum, *it); // copies the value to here
540     }
541     }
542    
543     // Implementation is like copy_from, but it destroys the table of the
544     // "from" guy by freeing sparsetable memory as we iterate. This is
545     // useful in resizing, since we're throwing away the "from" guy anyway.
546     void move_from(MoveDontCopyT mover, sparse_hashtable &ht,
547     size_type min_buckets_wanted) {
548     clear(); // clear table, set num_deleted to 0
549    
550     // If we need to change the size of our table, do it now
551     size_t resize_to;
552     if ( mover == MoveDontGrow )
553     resize_to = ht.bucket_count(); // keep same size as old ht
554     else // MoveDontCopy
555     resize_to = min_size(ht.size(), min_buckets_wanted);
556     if ( resize_to > bucket_count() ) { // we don't have enough buckets
557     table.resize(resize_to); // sets the number of buckets
558     reset_thresholds();
559     }
560    
561     // We use a normal iterator to get non-deleted bcks from ht
562     // We could use insert() here, but since we know there are
563     // no duplicates and no deleted items, we can be more efficient
564     assert( (bucket_count() & (bucket_count()-1)) == 0); // a power of two
565     // THIS IS THE MAJOR LINE THAT DIFFERS FROM COPY_FROM():
566     for ( destructive_iterator it = ht.destructive_begin();
567     it != ht.destructive_end(); ++it ) {
568     size_type num_probes = 0; // how many times we've probed
569     size_type bucknum;
570     for ( bucknum = hash(get_key(*it)) & (bucket_count()-1); // h % buck_cnt
571     table.test(bucknum); // not empty
572     bucknum = (bucknum + JUMP_(key, num_probes)) & (bucket_count()-1) ) {
573     ++num_probes;
574     assert(num_probes < bucket_count()); // or else the hashtable is full
575     }
576     table.set(bucknum, *it); // copies the value to here
577     }
578     }
579    
580    
581     // Required by the spec for hashed associative container
582     public:
583     // Though the docs say this should be num_buckets, I think it's much
584     // more useful as num_elements. As a special feature, calling with
585     // req_elements==0 will cause us to shrink if we can, saving space.
586     void resize(size_type req_elements) { // resize to this or larger
587     if ( consider_shrink || req_elements == 0 )
588     maybe_shrink();
589     if ( req_elements > table.num_nonempty() ) // we only grow
590     resize_delta(req_elements - table.num_nonempty());
591     }
592    
593     // Change the value of shrink_resize_percent and
594     // enlarge_resize_percent. The description at the beginning of this
595     // file explains how to choose the values. Setting the shrink
596     // parameter to 0.0 ensures that the table never shrinks.
597     void set_resizing_parameters(float shrink, float grow) {
598     assert(shrink >= 0.0);
599     assert(grow <= 1.0);
600     assert(shrink <= grow/2.0);
601     shrink_resize_percent = shrink;
602     enlarge_resize_percent = grow;
603     reset_thresholds();
604     }
605    
606     // CONSTRUCTORS -- as required by the specs, we take a size,
607     // but also let you specify a hashfunction, key comparator,
608     // and key extractor. We also define a copy constructor and =.
609     // DESTRUCTOR -- the default is fine, surprisingly.
610     explicit sparse_hashtable(size_type expected_max_items_in_table = 0,
611     const HashFcn& hf = HashFcn(),
612     const EqualKey& eql = EqualKey(),
613     const ExtractKey& ext = ExtractKey())
614     : hash(hf), equals(eql), get_key(ext), num_deleted(0), use_deleted(false),
615     delval(), enlarge_resize_percent(HT_OCCUPANCY_FLT),
616     shrink_resize_percent(HT_EMPTY_FLT),
617     table(expected_max_items_in_table == 0
618     ? HT_DEFAULT_STARTING_BUCKETS
619     : min_size(expected_max_items_in_table, 0)) {
620     reset_thresholds();
621     }
622    
623     // As a convenience for resize(), we allow an optional second argument
624     // which lets you make this new hashtable a different size than ht.
625     // We also provide a mechanism of saying you want to "move" the ht argument
626     // into us instead of copying.
627     sparse_hashtable(const sparse_hashtable& ht,
628     size_type min_buckets_wanted = HT_DEFAULT_STARTING_BUCKETS)
629     : hash(ht.hash), equals(ht.equals), get_key(ht.get_key),
630     num_deleted(0), use_deleted(ht.use_deleted), delval(ht.delval),
631     enlarge_resize_percent(ht.enlarge_resize_percent),
632     shrink_resize_percent(ht.shrink_resize_percent),
633     table() {
634     reset_thresholds();
635     copy_from(ht, min_buckets_wanted); // copy_from() ignores deleted entries
636     }
637     sparse_hashtable(MoveDontCopyT mover, sparse_hashtable& ht,
638     size_type min_buckets_wanted = HT_DEFAULT_STARTING_BUCKETS)
639     : hash(ht.hash), equals(ht.equals), get_key(ht.get_key),
640     num_deleted(0), use_deleted(ht.use_deleted), delval(ht.delval),
641     enlarge_resize_percent(ht.enlarge_resize_percent),
642     shrink_resize_percent(ht.shrink_resize_percent),
643     table() {
644     reset_thresholds();
645     move_from(mover, ht, min_buckets_wanted); // ignores deleted entries
646     }
647    
648     sparse_hashtable& operator= (const sparse_hashtable& ht) {
649     if (&ht == this) return *this; // don't copy onto ourselves
650     clear();
651     hash = ht.hash;
652     equals = ht.equals;
653     get_key = ht.get_key;
654     use_deleted = ht.use_deleted;
655     set_value(&delval, ht.delval);
656     copy_from(ht, HT_MIN_BUCKETS); // sets num_deleted to 0 too
657     return *this;
658     }
659    
660     // Many STL algorithms use swap instead of copy constructors
661     void swap(sparse_hashtable& ht) {
662     STL_NAMESPACE::swap(hash, ht.hash);
663     STL_NAMESPACE::swap(equals, ht.equals);
664     STL_NAMESPACE::swap(get_key, ht.get_key);
665     STL_NAMESPACE::swap(num_deleted, ht.num_deleted);
666     STL_NAMESPACE::swap(use_deleted, ht.use_deleted);
667     STL_NAMESPACE::swap(enlarge_resize_percent, ht.enlarge_resize_percent);
668     STL_NAMESPACE::swap(shrink_resize_percent, ht.shrink_resize_percent);
669     { value_type tmp; // for annoying reasons, swap() doesn't work
670     set_value(&tmp, delval);
671     set_value(&delval, ht.delval);
672     set_value(&ht.delval, tmp);
673     }
674     table.swap(ht.table);
675     reset_thresholds();
676     ht.reset_thresholds();
677     }
678    
679     // It's always nice to be able to clear a table without deallocating it
680     void clear() {
681     table.clear();
682     reset_thresholds();
683     num_deleted = 0;
684     }
685    
686    
687     // LOOKUP ROUTINES
688     private:
689     // Returns a pair of positions: 1st where the object is, 2nd where
690     // it would go if you wanted to insert it. 1st is ILLEGAL_BUCKET
691     // if object is not found; 2nd is ILLEGAL_BUCKET if it is.
692     // Note: because of deletions where-to-insert is not trivial: it's the
693     // first deleted bucket we see, as long as we don't find the key later
694     pair<size_type, size_type> find_position(const key_type &key) const {
695     size_type num_probes = 0; // how many times we've probed
696     const size_type bucket_count_minus_one = bucket_count() - 1;
697     size_type bucknum = hash(key) & bucket_count_minus_one;
698     size_type insert_pos = ILLEGAL_BUCKET; // where we would insert
699     SPARSEHASH_STAT_UPDATE(total_lookups += 1);
700     while ( 1 ) { // probe until something happens
701     if ( !table.test(bucknum) ) { // bucket is empty
702     SPARSEHASH_STAT_UPDATE(total_probes += num_probes);
703     if ( insert_pos == ILLEGAL_BUCKET ) // found no prior place to insert
704     return pair<size_type,size_type>(ILLEGAL_BUCKET, bucknum);
705     else
706     return pair<size_type,size_type>(ILLEGAL_BUCKET, insert_pos);
707    
708     } else if ( test_deleted(bucknum) ) {// keep searching, but mark to insert
709     if ( insert_pos == ILLEGAL_BUCKET )
710     insert_pos = bucknum;
711    
712     } else if ( equals(key, get_key(table.get(bucknum))) ) {
713     SPARSEHASH_STAT_UPDATE(total_probes += num_probes);
714     return pair<size_type,size_type>(bucknum, ILLEGAL_BUCKET);
715     }
716     ++num_probes; // we're doing another probe
717     bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one;
718     assert(num_probes < bucket_count()); // don't probe too many times!
719     }
720     }
721    
722     public:
723     iterator find(const key_type& key) {
724     if ( size() == 0 ) return end();
725     pair<size_type, size_type> pos = find_position(key);
726     if ( pos.first == ILLEGAL_BUCKET ) // alas, not there
727     return end();
728     else
729     return iterator(this, table.get_iter(pos.first), table.nonempty_end());
730     }
731    
732     const_iterator find(const key_type& key) const {
733     if ( size() == 0 ) return end();
734     pair<size_type, size_type> pos = find_position(key);
735     if ( pos.first == ILLEGAL_BUCKET ) // alas, not there
736     return end();
737     else
738     return const_iterator(this,
739     table.get_iter(pos.first), table.nonempty_end());
740     }
741    
742     // Counts how many elements have key key. For maps, it's either 0 or 1.
743     size_type count(const key_type &key) const {
744     pair<size_type, size_type> pos = find_position(key);
745     return pos.first == ILLEGAL_BUCKET ? 0 : 1;
746     }
747    
748     // Likewise, equal_range doesn't really make sense for us. Oh well.
749     pair<iterator,iterator> equal_range(const key_type& key) {
750     const iterator pos = find(key); // either an iterator or end
751     return pair<iterator,iterator>(pos, pos);
752     }
753     pair<const_iterator,const_iterator> equal_range(const key_type& key) const {
754     const const_iterator pos = find(key); // either an iterator or end
755     return pair<iterator,iterator>(pos, pos);
756     }
757    
758    
759     // INSERTION ROUTINES
760     private:
761     // If you know *this is big enough to hold obj, use this routine
762     pair<iterator, bool> insert_noresize(const value_type& obj) {
763     // First, double-check we're not inserting delval
764     assert(!use_deleted || !equals(get_key(obj), get_key(delval)));
765     const pair<size_type,size_type> pos = find_position(get_key(obj));
766     if ( pos.first != ILLEGAL_BUCKET) { // object was already there
767     return pair<iterator,bool>(iterator(this, table.get_iter(pos.first),
768     table.nonempty_end()),
769     false); // false: we didn't insert
770     } else { // pos.second says where to put it
771     if ( test_deleted(pos.second) ) { // just replace if it's been del.
772     // The set() below will undelete this object. We just worry about stats
773     assert(num_deleted > 0);
774     --num_deleted; // used to be, now it isn't
775     }
776     table.set(pos.second, obj);
777     return pair<iterator,bool>(iterator(this, table.get_iter(pos.second),
778     table.nonempty_end()),
779     true); // true: we did insert
780     }
781     }
782    
783     public:
784     // This is the normal insert routine, used by the outside world
785     pair<iterator, bool> insert(const value_type& obj) {
786     resize_delta(1); // adding an object, grow if need be
787     return insert_noresize(obj);
788     }
789    
790     // When inserting a lot at a time, we specialize on the type of iterator
791     template <class InputIterator>
792     void insert(InputIterator f, InputIterator l) {
793     // specializes on iterator type
794     insert(f, l, typename STL_NAMESPACE::iterator_traits<InputIterator>::iterator_category());
795     }
796    
797     // Iterator supports operator-, resize before inserting
798     template <class ForwardIterator>
799     void insert(ForwardIterator f, ForwardIterator l,
800     STL_NAMESPACE::forward_iterator_tag) {
801     size_type n = STL_NAMESPACE::distance(f, l); // TODO(csilvers): standard?
802     resize_delta(n);
803     for ( ; n > 0; --n, ++f)
804     insert_noresize(*f);
805     }
806    
807     // Arbitrary iterator, can't tell how much to resize
808     template <class InputIterator>
809     void insert(InputIterator f, InputIterator l,
810     STL_NAMESPACE::input_iterator_tag) {
811     for ( ; f != l; ++f)
812     insert(*f);
813     }
814    
815    
816     // DELETION ROUTINES
817     size_type erase(const key_type& key) {
818     // First, double-check we're not erasing delval
819     assert(!use_deleted || !equals(key, get_key(delval)));
820     const_iterator pos = find(key); // shrug: shouldn't need to be const
821     if ( pos != end() ) {
822     assert(!test_deleted(pos)); // or find() shouldn't have returned it
823     set_deleted(pos);
824     ++num_deleted;
825     consider_shrink = true; // will think about shrink after next insert
826     return 1; // because we deleted one thing
827     } else {
828     return 0; // because we deleted nothing
829     }
830     }
831    
832     // This is really evil: really it should be iterator, not const_iterator.
833     // But...the only reason keys are const is to allow lookup.
834     // Since that's a moot issue for deleted keys, we allow const_iterators
835     void erase(const_iterator pos) {
836     if ( pos == end() ) return; // sanity check
837     if ( set_deleted(pos) ) { // true if object has been newly deleted
838     ++num_deleted;
839     consider_shrink = true; // will think about shrink after next insert
840     }
841     }
842    
843     void erase(const_iterator f, const_iterator l) {
844     for ( ; f != l; ++f) {
845     if ( set_deleted(f) ) // should always be true
846     ++num_deleted;
847     }
848     consider_shrink = true; // will think about shrink after next insert
849     }
850    
851    
852     // COMPARISON
853     bool operator==(const sparse_hashtable& ht) const {
854     // We really want to check that the hash functions are the same
855     // but alas there's no way to do this. We just hope.
856     return ( num_deleted == ht.num_deleted && table == ht.table );
857     }
858     bool operator!=(const sparse_hashtable& ht) const {
859     return !(*this == ht);
860     }
861    
862    
863     // I/O
864     // We support reading and writing hashtables to disk. NOTE that
865     // this only stores the hashtable metadata, not the stuff you've
866     // actually put in the hashtable! Alas, since I don't know how to
867     // write a hasher or key_equal, you have to make sure everything
868     // but the table is the same. We compact before writing.
869     bool write_metadata(FILE *fp) {
870     squash_deleted(); // so we don't have to worry about delkey
871     return table.write_metadata(fp);
872     }
873    
874     bool read_metadata(FILE *fp) {
875     num_deleted = 0; // since we got rid before writing
876     bool result = table.read_metadata(fp);
877     reset_thresholds();
878     return result;
879     }
880    
881     // Only meaningful if value_type is a POD.
882     bool write_nopointer_data(FILE *fp) {
883     return table.write_nopointer_data(fp);
884     }
885    
886     // Only meaningful if value_type is a POD.
887     bool read_nopointer_data(FILE *fp) {
888     return table.read_nopointer_data(fp);
889     }
890    
891     private:
892     // The actual data
893     hasher hash; // required by hashed_associative_container
894     key_equal equals;
895     ExtractKey get_key;
896     size_type num_deleted; // how many occupied buckets are marked deleted
897     bool use_deleted; // false until delval has been set
898     value_type delval; // which key marks deleted entries
899     float enlarge_resize_percent; // how full before resize
900     float shrink_resize_percent; // how empty before resize
901     size_type shrink_threshold; // table.size() * shrink_resize_percent
902     size_type enlarge_threshold; // table.size() * enlarge_resize_percent
903     sparsetable<value_type> table; // holds num_buckets and num_elements too
904     bool consider_shrink; // true if we should try to shrink before next insert
905    
906     void reset_thresholds() {
907     enlarge_threshold = static_cast<size_type>(table.size()
908     * enlarge_resize_percent);
909     shrink_threshold = static_cast<size_type>(table.size()
910     * shrink_resize_percent);
911     consider_shrink = false; // whatever caused us to reset already considered
912     }
913     };
914    
915     // We need a global swap as well
916     template <class V, class K, class HF, class ExK, class EqK, class A>
917     inline void swap(sparse_hashtable<V,K,HF,ExK,EqK,A> &x,
918     sparse_hashtable<V,K,HF,ExK,EqK,A> &y) {
919     x.swap(y);
920     }
921    
922     #undef JUMP_
923    
924     template <class V, class K, class HF, class ExK, class EqK, class A>
925     const typename sparse_hashtable<V,K,HF,ExK,EqK,A>::size_type
926     sparse_hashtable<V,K,HF,ExK,EqK,A>::ILLEGAL_BUCKET;
927    
928     // How full we let the table get before we resize. Knuth says .8 is
929     // good -- higher causes us to probe too much, though saves memory
930     template <class V, class K, class HF, class ExK, class EqK, class A>
931     const float sparse_hashtable<V,K,HF,ExK,EqK,A>::HT_OCCUPANCY_FLT = 0.8f;
932    
933     // How empty we let the table get before we resize lower.
934     // It should be less than OCCUPANCY_FLT / 2 or we thrash resizing
935     template <class V, class K, class HF, class ExK, class EqK, class A>
936     const float sparse_hashtable<V,K,HF,ExK,EqK,A>::HT_EMPTY_FLT = 0.4f *
937     sparse_hashtable<V,K,HF,ExK,EqK,A>::HT_OCCUPANCY_FLT;
938    
939     _END_GOOGLE_NAMESPACE_
940    
941     #endif /* _SPARSEHASHTABLE_H_ */

  ViewVC Help
Powered by ViewVC 1.1.22