Update Files

This commit is contained in:
2025-01-22 16:18:30 +01:00
parent ed4603cf95
commit a36294b518
16718 changed files with 2960346 additions and 0 deletions

View File

@ -0,0 +1,523 @@
/*
* Copyright (C)2005-2019 Haxe Foundation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package haxe.ds;
import java.NativeArray;
/*
* This IntMap implementation is based on khash (https://github.com/attractivechaos/klib/blob/master/khash.h)
* Copyright goes to Attractive Chaos <attractor@live.co.uk> and his contributors
*
* Thanks also to Jonas Malaco Filho for his Haxe-written IntMap code inspired by Python tables.
* (https://jonasmalaco.com/fossil/test/jonas-haxe/artifact/887b53126e237d6c68951111d594033403889304)
*/
@:coreApi class IntMap<T> implements haxe.Constraints.IMap<Int, T> {
private static inline var HASH_UPPER = 0.7;
private var flags:NativeArray<Int>;
private var _keys:NativeArray<Int>;
private var vals:NativeArray<T>;
private var nBuckets:Int;
private var size:Int;
private var nOccupied:Int;
private var upperBound:Int;
#if !no_map_cache
private var cachedKey:Int;
private var cachedIndex:Int;
#end
public function new():Void {
#if !no_map_cache
cachedIndex = -1;
#end
}
public function set(key:Int, value:T):Void {
var targetIndex:Int;
if (nOccupied >= upperBound) {
if (nBuckets > (size << 1)) {
resize(nBuckets - 1); // clear "deleted" elements
} else {
resize(nBuckets + 1);
}
}
var flags = flags, _keys = _keys;
{
var mask = nBuckets - 1,
hashedKey = hash(key),
curIndex = hashedKey & mask;
var delKey = -1, curFlag = 0;
// to speed things up, don't loop if the first bucket is already free
if (isEmpty(getFlag(flags, curIndex))) {
targetIndex = curIndex;
} else {
var inc = getInc(hashedKey, mask), last = curIndex;
while (!(_keys[curIndex] == key || isEmpty(curFlag = getFlag(flags, curIndex)))) {
if (delKey == -1 && isDel(curFlag)) {
delKey = curIndex;
}
curIndex = (curIndex + inc) & mask;
#if debug
assert(curIndex != last);
#end
}
if (delKey != -1 && isEmpty(getFlag(flags, curIndex))) {
targetIndex = delKey;
} else {
targetIndex = curIndex;
}
}
}
var flag = getFlag(flags, targetIndex);
if (isEmpty(flag)) {
_keys[targetIndex] = key;
vals[targetIndex] = value;
setIsBothFalse(flags, targetIndex);
size++;
nOccupied++;
} else if (isDel(flag)) {
_keys[targetIndex] = key;
vals[targetIndex] = value;
setIsBothFalse(flags, targetIndex);
size++;
} else {
#if debug
assert(_keys[targetIndex] == key);
#end
vals[targetIndex] = value;
}
}
private final function lookup(key:Int):Int {
if (nBuckets != 0) {
var flags = flags, _keys = _keys;
var mask = nBuckets - 1,
k = hash(key),
index = k & mask,
curFlag = -1,
inc = getInc(k, mask), /* inc == 1 for linear probing */
last = index;
do {
if (_keys[index] == key) {
if (isEmpty(curFlag = getFlag(flags, index))) {
index = (index + inc) & mask;
continue;
} else if (isDel(curFlag)) {
return -1;
} else {
return index;
}
} else {
index = (index + inc) & mask;
}
} while (index != last);
}
return -1;
}
public function get(key:Int):Null<T> {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return vals[idx];
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return vals[idx];
}
return null;
}
private function getDefault(key:Int, def:T):T {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return vals[idx];
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return vals[idx];
}
return def;
}
public function exists(key:Int):Bool {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return true;
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return true;
}
return false;
}
public function remove(key:Int):Bool {
var idx = -1;
#if !no_map_cache
if (!(cachedKey == key && ((idx = cachedIndex) != -1)))
#end
{
idx = lookup(key);
}
if (idx == -1) {
return false;
} else {
#if !no_map_cache
if (cachedKey == key) {
cachedIndex = -1;
}
#end
if (!isEither(getFlag(flags, idx))) {
setIsDelTrue(flags, idx);
--size;
vals[idx] = null;
// we do NOT reset the keys here, as unlike StringMap, we check for keys equality
// and stop if we find a key that is equal to the one we're looking for
// setting this to 0 will allow the hash to contain duplicate `0` keys
// (see #6457)
// _keys[idx] = 0;
}
return true;
}
}
private final function resize(newNBuckets:Int):Void {
// This function uses 0.25*n_bucktes bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets.
var newFlags = null;
var j = 1;
{
newNBuckets = roundUp(newNBuckets);
if (newNBuckets < 4)
newNBuckets = 4;
if (size >= (newNBuckets * HASH_UPPER + 0.5))
/* requested size is too small */ {
j = 0;
} else { /* hash table size to be changed (shrink or expand); rehash */
var nfSize = flagsSize(newNBuckets);
newFlags = new NativeArray(nfSize);
for (i in 0...nfSize) {
newFlags[i] = 0xaaaaaaaa; // isEmpty = true; isDel = false
}
if (nBuckets < newNBuckets) // expand
{
var k = new NativeArray(newNBuckets);
if (_keys != null) {
arrayCopy(_keys, 0, k, 0, nBuckets);
}
_keys = k;
var v = new NativeArray(newNBuckets);
if (vals != null) {
arrayCopy(vals, 0, v, 0, nBuckets);
}
vals = v;
} // otherwise shrink
}
}
if (j != 0) { // rehashing is required
#if !no_map_cache
// resetting cache
cachedKey = 0;
cachedIndex = -1;
#end
j = -1;
var nBuckets = nBuckets, _keys = _keys, vals = vals, flags = flags;
var newMask = newNBuckets - 1;
while (++j < nBuckets) {
if (!isEither(getFlag(flags, j))) {
var key = _keys[j];
var val = vals[j];
// do not set keys as 0 - see comment about #6457
// _keys[j] = 0;
vals[j] = cast null;
setIsDelTrue(flags, j);
while (true)
/* kick-out process; sort of like in Cuckoo hashing */ {
var k = hash(key);
var inc = getInc(k, newMask);
var i = k & newMask;
while (!isEmpty(getFlag(newFlags, i))) {
i = (i + inc) & newMask;
}
setIsEmptyFalse(newFlags, i);
if (i < nBuckets && !isEither(getFlag(flags, i)))
/* kick out the existing element */ {
{
var tmp = _keys[i];
_keys[i] = key;
key = tmp;
} {
var tmp = vals[i];
vals[i] = val;
val = tmp;
}
setIsDelTrue(flags, i); /* mark it as deleted in the old hash table */
} else { /* write the element and jump out of the loop */
_keys[i] = key;
vals[i] = val;
break;
}
}
}
}
if (nBuckets > newNBuckets)
/* shrink the hash table */ {
{
var k = new NativeArray(newNBuckets);
arrayCopy(_keys, 0, k, 0, newNBuckets);
this._keys = k;
} {
var v = new NativeArray(newNBuckets);
arrayCopy(vals, 0, v, 0, newNBuckets);
this.vals = v;
}
}
this.flags = newFlags;
this.nBuckets = newNBuckets;
this.nOccupied = size;
this.upperBound = Std.int(newNBuckets * HASH_UPPER + .5);
}
}
public inline function keys():Iterator<Int> {
return new IntMapKeyIterator(this);
}
public inline function iterator():Iterator<T> {
return new IntMapValueIterator(this);
}
@:runtime public inline function keyValueIterator():KeyValueIterator<Int, T> {
return new haxe.iterators.MapKeyValueIterator(this);
}
public function copy():IntMap<T> {
var copied = new IntMap();
for (key in keys())
copied.set(key, get(key));
return copied;
}
public function toString():String {
var s = new StringBuf();
s.add("{");
var it = keys();
for (i in it) {
s.add(i);
s.add(" => ");
s.add(Std.string(get(i)));
if (it.hasNext())
s.add(", ");
}
s.add("}");
return s.toString();
}
public function clear():Void {
flags = null;
_keys = null;
vals = null;
nBuckets = 0;
size = 0;
nOccupied = 0;
upperBound = 0;
#if !no_map_cache
cachedKey = 0;
cachedIndex = -1;
#end
}
private static inline function assert(x:Bool):Void {
#if debug
if (!x)
throw "assert failed";
#end
}
private static inline function defaultK():Int
return 0;
private static inline function arrayCopy(sourceArray:Dynamic, sourceIndex:Int, destinationArray:Dynamic, destinationIndex:Int, length:Int):Void
java.lang.System.arraycopy(sourceArray, sourceIndex, destinationArray, destinationIndex, length);
private static inline function getInc(k:Int, mask:Int):Int
return (((k) >> 3 ^ (k) << 3) | 1) & (mask);
private static inline function hash(i:Int):Int
return i;
// flags represents a bit array with 2 significant bits for each index
// one bit for deleted (1), one for empty (2)
// so what this function does is:
// * gets the integer with (flags / 16)
// * shifts those bits to the right ((flags % 16) * 2) places
// * masks it with 0b11
private static inline function getFlag(flags:NativeArray<Int>, i:Int):Int {
return ((flags[i >> 4] >>> ((i & 0xf) << 1)) & 3);
}
private static inline function isDel(flag:Int):Bool {
return (flag & 1) != 0;
}
private static inline function isEmpty(flag:Int):Bool {
return (flag & 2) != 0;
}
private static inline function isEither(flag:Int):Bool {
return flag != 0;
}
private static inline function setIsDelFalse(flags:NativeArray<Int>, i:Int):Void {
flags[i >> 4] &= ~(1 << ((i & 0xf) << 1));
}
private static inline function setIsEmptyFalse(flags:NativeArray<Int>, i:Int):Void {
flags[i >> 4] &= ~(2 << ((i & 0xf) << 1));
}
private static inline function setIsBothFalse(flags:NativeArray<Int>, i:Int):Void {
flags[i >> 4] &= ~(3 << ((i & 0xf) << 1));
}
private static inline function setIsDelTrue(flags:NativeArray<Int>, i:Int):Void {
flags[i >> 4] |= 1 << ((i & 0xf) << 1);
}
private static inline function roundUp(x:Int):Int {
--x;
x |= (x) >>> 1;
x |= (x) >>> 2;
x |= (x) >>> 4;
x |= (x) >>> 8;
x |= (x) >>> 16;
return ++x;
}
private static inline function flagsSize(m:Int):Int
return ((m) < 16 ? 1 : (m) >> 4);
}
@:access(haxe.ds.IntMap)
private final class IntMapKeyIterator<T> {
var m:IntMap<T>;
var i:Int;
var len:Int;
public function new(m:IntMap<T>) {
this.i = 0;
this.m = m;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!IntMap.isEither(IntMap.getFlag(m.flags, j))) {
i = j;
return true;
}
}
return false;
}
public function next():Int {
var ret = m._keys[i];
#if !no_map_cache
m.cachedIndex = i;
m.cachedKey = ret;
#end
i++;
return ret;
}
}
@:access(haxe.ds.IntMap)
private final class IntMapValueIterator<T> {
var m:IntMap<T>;
var i:Int;
var len:Int;
public function new(m:IntMap<T>) {
this.i = 0;
this.m = m;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!IntMap.isEither(IntMap.getFlag(m.flags, j))) {
i = j;
return true;
}
}
return false;
}
public inline function next():T {
return m.vals[i++];
}
}

View File

@ -0,0 +1,538 @@
/*
* Copyright (C)2005-2019 Haxe Foundation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package haxe.ds;
import java.NativeArray;
@:coreApi class ObjectMap<K:{}, V> implements haxe.Constraints.IMap<K, V> {
extern private static inline var HASH_UPPER = 0.77;
extern private static inline var FLAG_EMPTY = 0;
extern private static inline var FLAG_DEL = 1;
/**
* This is the most important structure here and the reason why it's so fast.
* It's an array of all the hashes contained in the table. These hashes cannot be 0 nor 1,
* which stand for "empty" and "deleted" states.
*
* The lookup algorithm will keep looking until a 0 or the key wanted is found;
* The insertion algorithm will do the same but will also break when FLAG_DEL is found;
*/
private var hashes:NativeArray<HashType>;
private var _keys:NativeArray<K>;
private var vals:NativeArray<V>;
private var nBuckets:Int;
private var size:Int;
private var nOccupied:Int;
private var upperBound:Int;
#if !no_map_cache
private var cachedKey:K;
private var cachedIndex:Int;
#end
#if DEBUG_HASHTBL
private var totalProbes:Int;
private var probeTimes:Int;
private var sameHash:Int;
private var maxProbe:Int;
#end
public function new():Void {
#if !no_map_cache
cachedIndex = -1;
#end
}
public function set(key:K, value:V):Void {
var x:Int, k:Int;
if (nOccupied >= upperBound) {
if (nBuckets > (size << 1))
resize(nBuckets - 1); // clear "deleted" elements
else
resize(nBuckets + 2);
}
var hashes = hashes, keys = _keys, hashes = hashes;
{
var mask = (nBuckets == 0) ? 0 : nBuckets - 1;
var site = x = nBuckets;
k = hash(key);
var i = k & mask, nProbes = 0;
var delKey = -1;
// for speed up
if (isEmpty(hashes[i])) {
x = i;
} else {
// var inc = getInc(k, mask);
var last = i, flag;
while (!(isEmpty(flag = hashes[i]) || (flag == k && (cast keys[i] : java.lang.Object).equals(key)))) {
if (isDel(flag) && delKey == -1)
delKey = i;
i = (i + ++nProbes) & mask;
#if DEBUG_HASHTBL
probeTimes++;
if (i == last)
throw "assert";
#end
}
if (isEmpty(flag) && delKey != -1)
x = delKey;
else
x = i;
}
#if DEBUG_HASHTBL
if (nProbes > maxProbe)
maxProbe = nProbes;
totalProbes++;
#end
}
var flag = hashes[x];
if (isEmpty(flag)) {
keys[x] = key;
vals[x] = value;
hashes[x] = k;
size++;
nOccupied++;
} else if (isDel(flag)) {
keys[x] = key;
vals[x] = value;
hashes[x] = k;
size++;
} else {
assert(keys[x] == key);
vals[x] = value;
}
#if !no_map_cache
cachedIndex = x;
cachedKey = key;
#end
}
private final function lookup(key:K):Int {
if (nBuckets != 0) {
var hashes = hashes, keys = _keys;
var mask = nBuckets - 1, hash = hash(key), k = hash, nProbes = 0;
var i = k & mask;
var last = i, flag;
// var inc = getInc(k, mask);
while (!isEmpty(flag = hashes[i]) && (isDel(flag) || flag != k || !((cast keys[i] : java.lang.Object).equals(key)))) {
i = (i + ++nProbes) & mask;
#if DEBUG_HASHTBL
probeTimes++;
if (i == last)
throw "assert";
#end
}
#if DEBUG_HASHTBL
if (nProbes > maxProbe)
maxProbe = nProbes;
totalProbes++;
#end
return isEither(flag) ? -1 : i;
}
return -1;
}
@:private final function resize(newNBuckets:Int):Void {
// This function uses 0.25*n_bucktes bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets.
var newHash = null;
var j = 1;
{
newNBuckets = roundUp(newNBuckets);
if (newNBuckets < 4)
newNBuckets = 4;
if (size >= (newNBuckets * HASH_UPPER + 0.5))
/* requested size is too small */ {
j = 0;
} else { /* hash table size to be changed (shrink or expand); rehash */
var nfSize = newNBuckets;
newHash = new NativeArray(nfSize);
if (nBuckets < newNBuckets) // expand
{
var k = new NativeArray(newNBuckets);
if (_keys != null)
arrayCopy(_keys, 0, k, 0, nBuckets);
_keys = k;
var v = new NativeArray(newNBuckets);
if (vals != null)
arrayCopy(vals, 0, v, 0, nBuckets);
vals = v;
} // otherwise shrink
}
}
if (j != 0) { // rehashing is required
// resetting cache
#if !no_map_cache
cachedKey = null;
cachedIndex = -1;
#end
j = -1;
var nBuckets = nBuckets,
_keys = _keys,
vals = vals,
hashes = hashes;
var newMask = newNBuckets - 1;
while (++j < nBuckets) {
var k;
if (!isEither(k = hashes[j])) {
var key = _keys[j];
var val = vals[j];
_keys[j] = null;
vals[j] = cast null;
hashes[j] = FLAG_DEL;
while (true)
/* kick-out process; sort of like in Cuckoo hashing */ {
var nProbes = 0;
// var inc = getInc(k, newMask);
var i = k & newMask;
while (!isEmpty(newHash[i]))
i = (i + ++nProbes) & newMask;
newHash[i] = k;
if (i < nBuckets && !isEither(k = hashes[i]))
/* kick out the existing element */ {
{
var tmp = _keys[i];
_keys[i] = key;
key = tmp;
} {
var tmp = vals[i];
vals[i] = val;
val = tmp;
}
hashes[i] = FLAG_DEL; /* mark it as deleted in the old hash table */
} else { /* write the element and jump out of the loop */
_keys[i] = key;
vals[i] = val;
break;
}
}
}
}
if (nBuckets > newNBuckets)
/* shrink the hash table */ {
{
var k = new NativeArray(newNBuckets);
arrayCopy(_keys, 0, k, 0, newNBuckets);
this._keys = k;
} {
var v = new NativeArray(newNBuckets);
arrayCopy(vals, 0, v, 0, newNBuckets);
this.vals = v;
}
}
this.hashes = newHash;
this.nBuckets = newNBuckets;
this.nOccupied = size;
this.upperBound = Std.int(newNBuckets * HASH_UPPER + .5);
}
}
public function get(key:K):Null<V> {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return vals[idx];
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return vals[idx];
}
return null;
}
private function getDefault(key:K, def:V):V {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return vals[idx];
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return vals[idx];
}
return def;
}
public function exists(key:K):Bool {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return true;
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return true;
}
return false;
}
public function remove(key:K):Bool {
var idx = -1;
#if !no_map_cache
if (!(cachedKey == key && ((idx = cachedIndex) != -1)))
#end
{
idx = lookup(key);
}
if (idx == -1) {
return false;
} else {
#if !no_map_cache
if (cachedKey == key)
cachedIndex = -1;
#end
hashes[idx] = FLAG_DEL;
_keys[idx] = null;
vals[idx] = null;
--size;
return true;
}
}
public function keys():Iterator<K> {
return new ObjectMapKeyIterator(this);
}
public function iterator():Iterator<V> {
return new ObjectMapValueIterator(this);
}
@:runtime public inline function keyValueIterator():KeyValueIterator<K, V> {
return new haxe.iterators.MapKeyValueIterator(this);
}
public function copy():ObjectMap<K, V> {
var copied = new ObjectMap();
for (key in keys())
copied.set(key, get(key));
return copied;
}
public function toString():String {
var s = new StringBuf();
s.add("{");
var it = keys();
for (i in it) {
s.add(Std.string(i));
s.add(" => ");
s.add(Std.string(get(i)));
if (it.hasNext())
s.add(", ");
}
s.add("}");
return s.toString();
}
public function clear():Void {
hashes = null;
_keys = null;
vals = null;
nBuckets = 0;
size = 0;
nOccupied = 0;
upperBound = 0;
#if !no_map_cache
cachedKey = null;
cachedIndex = -1;
#end
#if DEBUG_HASHTBL
totalProbes = 0;
probeTimes = 0;
sameHash = 0;
maxProbe = 0;
#end
}
extern private static inline function roundUp(x:Int):Int {
--x;
x |= (x) >>> 1;
x |= (x) >>> 2;
x |= (x) >>> 4;
x |= (x) >>> 8;
x |= (x) >>> 16;
return ++x;
}
extern private static inline function getInc(k:Int, mask:Int):Int // return 1 for linear probing
return (((k) >> 3 ^ (k) << 3) | 1) & (mask);
extern private static inline function isEither(v:HashType):Bool
return (v & 0xFFFFFFFE) == 0;
extern private static inline function isEmpty(v:HashType):Bool
return v == FLAG_EMPTY;
extern private static inline function isDel(v:HashType):Bool
return v == FLAG_DEL;
// guarantee: Whatever this function is, it will never return 0 nor 1
extern private static inline function hash(s:Dynamic):HashType {
var k:Int = (cast s : java.lang.Object).hashCode();
// k *= 357913941;
// k ^= k << 24;
// k += ~357913941;
// k ^= k >> 31;
// k ^= k << 31;
k = (k + 0x7ed55d16) + (k << 12);
k = (k ^ 0xc761c23c) ^ (k >> 19);
k = (k + 0x165667b1) + (k << 5);
k = (k + 0xd3a2646c) ^ (k << 9);
k = (k + 0xfd7046c5) + (k << 3);
k = (k ^ 0xb55a4f09) ^ (k >> 16);
var ret = k;
if (isEither(ret)) {
if (ret == 0)
ret = 2;
else
ret = 0xFFFFFFFF;
}
return ret;
}
extern private static inline function arrayCopy(sourceArray:Dynamic, sourceIndex:Int, destinationArray:Dynamic, destinationIndex:Int, length:Int):Void
java.lang.System.arraycopy(sourceArray, sourceIndex, destinationArray, destinationIndex, length);
extern private static inline function assert(x:Bool):Void {
#if DEBUG_HASHTBL
if (!x)
throw "assert failed";
#end
}
}
@:access(haxe.ds.ObjectMap)
private final class ObjectMapKeyIterator<T:{}, V> {
var m:ObjectMap<T, V>;
var i:Int;
var len:Int;
public function new(m:ObjectMap<T, V>) {
this.i = 0;
this.m = m;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!ObjectMap.isEither(m.hashes[j])) {
i = j;
return true;
}
}
return false;
}
public function next():T {
var ret = m._keys[i];
#if !no_map_cache
m.cachedIndex = i;
m.cachedKey = ret;
#end
i = i + 1;
return ret;
}
}
@:access(haxe.ds.ObjectMap)
private final class ObjectMapValueIterator<K:{}, T> {
var m:ObjectMap<K, T>;
var i:Int;
var len:Int;
public function new(m:ObjectMap<K, T>) {
this.i = 0;
this.m = m;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!ObjectMap.isEither(m.hashes[j])) {
i = j;
return true;
}
}
return false;
}
public inline function next():T {
var ret = m.vals[i];
i = i + 1;
return ret;
}
}
private typedef HashType = Int;

View File

@ -0,0 +1,533 @@
/*
* Copyright (C)2005-2019 Haxe Foundation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package haxe.ds;
import java.NativeArray;
@:coreApi class StringMap<T> implements haxe.Constraints.IMap<String, T> {
extern private static inline var HASH_UPPER = 0.77;
extern private static inline var FLAG_EMPTY = 0;
extern private static inline var FLAG_DEL = 1;
/**
* This is the most important structure here and the reason why it's so fast.
* It's an array of all the hashes contained in the table. These hashes cannot be 0 nor 1,
* which stand for "empty" and "deleted" states.
*
* The lookup algorithm will keep looking until a 0 or the key wanted is found;
* The insertion algorithm will do the same but will also break when FLAG_DEL is found;
*/
private var hashes:NativeArray<HashType>;
private var _keys:NativeArray<String>;
private var vals:NativeArray<T>;
private var nBuckets:Int;
private var size:Int;
private var nOccupied:Int;
private var upperBound:Int;
#if !no_map_cache
private var cachedKey:String;
private var cachedIndex:Int;
#end
#if DEBUG_HASHTBL
private var totalProbes:Int;
private var probeTimes:Int;
private var sameHash:Int;
private var maxProbe:Int;
#end
public function new():Void {
#if !no_map_cache
cachedIndex = -1;
#end
}
public function set(key:String, value:T):Void {
var x:Int, k:Int;
if (nOccupied >= upperBound) {
if (nBuckets > (size << 1)) {
resize(nBuckets - 1); // clear "deleted" elements
} else {
resize(nBuckets + 2);
}
}
var hashes = hashes, keys = _keys, hashes = hashes;
{
var mask = (nBuckets == 0) ? 0 : nBuckets - 1;
var site = x = nBuckets;
k = hash(key);
var i = k & mask, nProbes = 0;
var delKey = -1;
// to speed things up, don't loop if the first bucket is already free
if (isEmpty(hashes[i])) {
x = i;
} else {
var last = i, flag;
while (!(isEmpty(flag = hashes[i]) || (flag == k && _keys[i] == key))) {
if (isDel(flag) && delKey == -1) {
delKey = i;
}
i = (i + ++nProbes) & mask;
#if DEBUG_HASHTBL
probeTimes++;
if (i == last)
throw "assert";
#end
}
if (isEmpty(flag) && delKey != -1) {
x = delKey;
} else {
x = i;
}
}
#if DEBUG_HASHTBL
if (nProbes > maxProbe)
maxProbe = nProbes;
totalProbes++;
#end
}
var flag = hashes[x];
if (isEmpty(flag)) {
keys[x] = key;
vals[x] = value;
hashes[x] = k;
size++;
nOccupied++;
} else if (isDel(flag)) {
keys[x] = key;
vals[x] = value;
hashes[x] = k;
size++;
} else {
assert(_keys[x] == key);
vals[x] = value;
}
#if !no_map_cache
cachedIndex = x;
cachedKey = key;
#end
}
private final function lookup(key:String):Int {
if (nBuckets != 0) {
var hashes = hashes, keys = _keys;
var mask = nBuckets - 1, hash = hash(key), k = hash, nProbes = 0;
var i = k & mask;
var last = i, flag;
// if we hit an empty bucket, it means we're done
while (!isEmpty(flag = hashes[i]) && (isDel(flag) || flag != k || keys[i] != key)) {
i = (i + ++nProbes) & mask;
#if DEBUG_HASHTBL
probeTimes++;
if (i == last)
throw "assert";
#end
}
#if DEBUG_HASHTBL
if (nProbes > maxProbe)
maxProbe = nProbes;
totalProbes++;
#end
return isEither(flag) ? -1 : i;
}
return -1;
}
@:private final function resize(newNBuckets:Int):Void {
// This function uses 0.25*n_bucktes bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets.
var newHash = null;
var j = 1;
{
newNBuckets = roundUp(newNBuckets);
if (newNBuckets < 4)
newNBuckets = 4;
if (size >= (newNBuckets * HASH_UPPER + 0.5))
/* requested size is too small */ {
j = 0;
} else { /* hash table size to be changed (shrink or expand); rehash */
var nfSize = newNBuckets;
newHash = new NativeArray(nfSize);
if (nBuckets < newNBuckets) // expand
{
var k = new NativeArray(newNBuckets);
if (_keys != null)
arrayCopy(_keys, 0, k, 0, nBuckets);
_keys = k;
var v = new NativeArray(newNBuckets);
if (vals != null)
arrayCopy(vals, 0, v, 0, nBuckets);
vals = v;
} // otherwise shrink
}
}
if (j != 0) { // rehashing is required
// resetting cache
#if !no_map_cache
cachedKey = null;
cachedIndex = -1;
#end
j = -1;
var nBuckets = nBuckets,
_keys = _keys,
vals = vals,
hashes = hashes;
var newMask = newNBuckets - 1;
while (++j < nBuckets) {
var k;
if (!isEither(k = hashes[j])) {
var key = _keys[j];
var val = vals[j];
_keys[j] = null;
vals[j] = cast null;
hashes[j] = FLAG_DEL;
while (true)
/* kick-out process; sort of like in Cuckoo hashing */ {
var nProbes = 0;
var i = k & newMask;
while (!isEmpty(newHash[i])) {
i = (i + ++nProbes) & newMask;
}
newHash[i] = k;
if (i < nBuckets && !isEither(k = hashes[i]))
/* kick out the existing element */ {
{
var tmp = _keys[i];
_keys[i] = key;
key = tmp;
} {
var tmp = vals[i];
vals[i] = val;
val = tmp;
}
hashes[i] = FLAG_DEL; /* mark it as deleted in the old hash table */
} else { /* write the element and jump out of the loop */
_keys[i] = key;
vals[i] = val;
break;
}
}
}
}
if (nBuckets > newNBuckets)
/* shrink the hash table */ {
{
var k = new NativeArray(newNBuckets);
arrayCopy(_keys, 0, k, 0, newNBuckets);
this._keys = k;
} {
var v = new NativeArray(newNBuckets);
arrayCopy(vals, 0, v, 0, newNBuckets);
this.vals = v;
}
}
this.hashes = newHash;
this.nBuckets = newNBuckets;
this.nOccupied = size;
this.upperBound = Std.int(newNBuckets * HASH_UPPER + .5);
}
}
public function get(key:String):Null<T> {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return vals[idx];
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return vals[idx];
}
return null;
}
private function getDefault(key:String, def:T):T {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return vals[idx];
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return vals[idx];
}
return def;
}
public function exists(key:String):Bool {
var idx = -1;
#if !no_map_cache
if (cachedKey == key && ((idx = cachedIndex) != -1)) {
return true;
}
#end
idx = lookup(key);
if (idx != -1) {
#if !no_map_cache
cachedKey = key;
cachedIndex = idx;
#end
return true;
}
return false;
}
public function remove(key:String):Bool {
var idx = -1;
#if !no_map_cache
if (!(cachedKey == key && ((idx = cachedIndex) != -1)))
#end
{
idx = lookup(key);
}
if (idx == -1) {
return false;
} else {
#if !no_map_cache
if (cachedKey == key) {
cachedIndex = -1;
}
#end
hashes[idx] = FLAG_DEL;
_keys[idx] = null;
vals[idx] = null;
--size;
return true;
}
}
public inline function keys():Iterator<String> {
return new StringMapKeyIterator(this);
}
@:runtime public inline function keyValueIterator():KeyValueIterator<String, T> {
return new haxe.iterators.MapKeyValueIterator(this);
}
public inline function iterator():Iterator<T> {
return new StringMapValueIterator(this);
}
public function copy():StringMap<T> {
var copied = new StringMap();
for (key in keys())
copied.set(key, get(key));
return copied;
}
public function toString():String {
var s = new StringBuf();
s.add("{");
var it = keys();
for (i in it) {
s.add(i);
s.add(" => ");
s.add(Std.string(get(i)));
if (it.hasNext())
s.add(", ");
}
s.add("}");
return s.toString();
}
public function clear():Void {
hashes = null;
_keys = null;
vals = null;
nBuckets = 0;
size = 0;
nOccupied = 0;
upperBound = 0;
#if !no_map_cache
cachedKey = null;
cachedIndex = -1;
#end
#if DEBUG_HASHTBL
totalProbes = 0;
probeTimes = 0;
sameHash = 0;
maxProbe = 0;
#end
}
extern private static inline function roundUp(x:Int):Int {
--x;
x |= (x) >>> 1;
x |= (x) >>> 2;
x |= (x) >>> 4;
x |= (x) >>> 8;
x |= (x) >>> 16;
return ++x;
}
extern private static inline function getInc(k:Int, mask:Int):Int // return 1 for linear probing
return (((k) >> 3 ^ (k) << 3) | 1) & (mask);
extern private static inline function isEither(v:HashType):Bool
return (v & 0xFFFFFFFE) == 0;
extern private static inline function isEmpty(v:HashType):Bool
return v == FLAG_EMPTY;
extern private static inline function isDel(v:HashType):Bool
return v == FLAG_DEL;
// guarantee: Whatever this function is, it will never return 0 nor 1
extern private static inline function hash(s:String):HashType {
var k:Int = (cast s : java.NativeString).hashCode();
// k *= 357913941;
// k ^= k << 24;
// k += ~357913941;
// k ^= k >> 31;
// k ^= k << 31;
k = (k + 0x7ed55d16) + (k << 12);
k = (k ^ 0xc761c23c) ^ (k >> 19);
k = (k + 0x165667b1) + (k << 5);
k = (k + 0xd3a2646c) ^ (k << 9);
k = (k + 0xfd7046c5) + (k << 3);
k = (k ^ 0xb55a4f09) ^ (k >> 16);
var ret = k;
if (isEither(ret)) {
if (ret == 0)
ret = 2;
else
ret = 0xFFFFFFFF;
}
return ret;
}
extern private static inline function arrayCopy(sourceArray:Dynamic, sourceIndex:Int, destinationArray:Dynamic, destinationIndex:Int, length:Int):Void
java.lang.System.arraycopy(sourceArray, sourceIndex, destinationArray, destinationIndex, length);
extern private static inline function assert(x:Bool):Void {
#if DEBUG_HASHTBL
if (!x)
throw "assert failed";
#end
}
}
private typedef HashType = Int;
@:access(haxe.ds.StringMap)
private final class StringMapKeyIterator<T> {
var m:StringMap<T>;
var i:Int;
var len:Int;
public function new(m:StringMap<T>) {
this.m = m;
this.i = 0;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!StringMap.isEither(m.hashes[j])) {
i = j;
return true;
}
}
return false;
}
public function next():String {
var ret = m._keys[i];
#if !no_map_cache
m.cachedIndex = i;
m.cachedKey = ret;
#end
i++;
return ret;
}
}
@:access(haxe.ds.StringMap)
private final class StringMapValueIterator<T> {
var m:StringMap<T>;
var i:Int;
var len:Int;
public function new(m:StringMap<T>) {
this.m = m;
this.i = 0;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!StringMap.isEither(m.hashes[j])) {
i = j;
return true;
}
}
return false;
}
public inline function next():T {
return m.vals[i++];
}
}

View File

@ -0,0 +1,582 @@
/*
* Copyright (C)2005-2019 Haxe Foundation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package haxe.ds;
import java.NativeArray;
import java.lang.ref.WeakReference;
import java.lang.ref.ReferenceQueue;
@:coreApi class WeakMap<K:{}, V> implements haxe.Constraints.IMap<K, V> {
extern private static inline var HASH_UPPER = 0.77;
extern private static inline var FLAG_EMPTY = 0;
extern private static inline var FLAG_DEL = 1;
/**
* This is the most important structure here and the reason why it's so fast.
* It's an array of all the hashes contained in the table. These hashes cannot be 0 nor 1,
* which stand for "empty" and "deleted" states.
*
* The lookup algorithm will keep looking until a 0 or the key wanted is found;
* The insertion algorithm will do the same but will also break when FLAG_DEL is found;
*/
private var hashes:NativeArray<HashType>;
private var entries:NativeArray<Entry<K, V>>;
// weak map specific
private var queue:ReferenceQueue<K>;
private var nBuckets:Int;
private var size:Int;
private var nOccupied:Int;
private var upperBound:Int;
#if !no_map_cache
private var cachedEntry:Entry<K, V>;
private var cachedIndex:Int;
#end
#if DEBUG_HASHTBL
private var totalProbes:Int;
private var probeTimes:Int;
private var sameHash:Int;
private var maxProbe:Int;
#end
public function new():Void {
#if !no_map_cache
cachedIndex = -1;
#end
queue = new ReferenceQueue();
}
@:analyzer(ignore)
private function cleanupRefs():Void {
var x:Dynamic = null, nOccupied = nOccupied;
while ((x = queue.poll()) != null) {
// even if not found on hashtable (already removed), release value
var x:Entry<K, V> = cast x;
x.value = null;
// lookup index
if (nOccupied != 0) {
var mask = nBuckets - 1, hash = x.hash, nProbes = 0;
var i = hash & mask;
var last = i, flag;
while (!isEmpty(flag = hashes[i]) && (isDel(flag) || flag != hash || entries[i] != x)) {
i = (i + ++nProbes) & mask;
}
if (entries[i] == x) {
#if !no_map_cache
if (cachedIndex == i) {
cachedIndex = -1;
cachedEntry = null;
}
#end
entries[i] = null;
hashes[i] = FLAG_DEL;
--size;
}
}
}
}
public function set(key:K, value:V):Void {
cleanupRefs();
var x:Int, k:Int;
if (nOccupied >= upperBound) {
if (nBuckets > (size << 1))
resize(nBuckets - 1); // clear "deleted" elements
else
resize(nBuckets + 2);
}
k = hash(key);
var hashes = hashes, entries = entries;
{
var mask = (nBuckets == 0) ? 0 : nBuckets - 1;
var site = x = nBuckets;
var i = k & mask, nProbes = 0;
var delKey = -1;
// for speed up
if (isEmpty(hashes[i])) {
x = i;
} else {
// var inc = getInc(k, mask);
var last = i, flag;
while (!(isEmpty(flag = hashes[i]) || (flag == k && entries[i].keyEquals(key)))) {
if (delKey == -1 && isDel(flag))
delKey = i;
i = (i + ++nProbes) & mask;
#if DEBUG_HASHTBL
probeTimes++;
if (i == last)
throw "assert";
#end
}
if (isEmpty(flag) && delKey != -1)
x = delKey;
else
x = i;
}
#if DEBUG_HASHTBL
if (nProbes > maxProbe)
maxProbe = nProbes;
totalProbes++;
#end
}
var flag = hashes[x], entry = new Entry(key, value, k, queue);
if (isEmpty(flag)) {
entries[x] = entry;
hashes[x] = k;
size++;
nOccupied++;
} else if (isDel(flag)) {
entries[x] = entry;
hashes[x] = k;
size++;
} else {
assert(entries[x].keyEquals(key));
entries[x] = entry;
}
#if !no_map_cache
cachedIndex = x;
cachedEntry = entry;
#end
}
private final function lookup(key:K):Int {
if (nBuckets != 0) {
var hashes = hashes, entries = entries;
var mask = nBuckets - 1, hash = hash(key), k = hash, nProbes = 0;
var i = k & mask;
var last = i, flag;
// var inc = getInc(k, mask);
while (!isEmpty(flag = hashes[i]) && (isDel(flag) || flag != k || !entries[i].keyEquals(key))) {
i = (i + ++nProbes) & mask;
#if DEBUG_HASHTBL
probeTimes++;
if (i == last)
throw "assert";
#end
}
#if DEBUG_HASHTBL
if (nProbes > maxProbe)
maxProbe = nProbes;
totalProbes++;
#end
return isEither(flag) ? -1 : i;
}
return -1;
}
@:private final function resize(newNBuckets:Int):Void {
// This function uses 0.25*n_bucktes bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets.
var newHash = null;
var j = 1;
{
newNBuckets = roundUp(newNBuckets);
if (newNBuckets < 4)
newNBuckets = 4;
if (size >= (newNBuckets * HASH_UPPER + 0.5))
/* requested size is too small */ {
j = 0;
} else { /* hash table size to be changed (shrink or expand); rehash */
var nfSize = newNBuckets;
newHash = new NativeArray(nfSize);
if (nBuckets < newNBuckets) // expand
{
var e = new NativeArray(newNBuckets);
if (entries != null)
arrayCopy(entries, 0, e, 0, nBuckets);
entries = e;
} // otherwise shrink
}
}
if (j != 0) { // rehashing is required
// resetting cache
#if !no_map_cache
cachedEntry = null;
cachedIndex = -1;
#end
j = -1;
var nBuckets = nBuckets, entries = entries, hashes = hashes;
var newMask = newNBuckets - 1;
while (++j < nBuckets) {
var k;
if (!isEither(k = hashes[j])) {
var entry = entries[j];
entries[j] = null;
hashes[j] = FLAG_DEL;
while (true)
/* kick-out process; sort of like in Cuckoo hashing */ {
var nProbes = 0;
var i = k & newMask;
while (!isEmpty(newHash[i]))
i = (i + ++nProbes) & newMask;
newHash[i] = k;
if (i < nBuckets && !isEither(k = hashes[i]))
/* kick out the existing element */ {
{
var tmp = entries[i];
entries[i] = entry;
entry = tmp;
}
hashes[i] = FLAG_DEL; /* mark it as deleted in the old hash table */
} else { /* write the element and jump out of the loop */
entries[i] = entry;
break;
}
}
}
}
if (nBuckets > newNBuckets)
/* shrink the hash table */ {
{
var e = new NativeArray(newNBuckets);
arrayCopy(entries, 0, e, 0, newNBuckets);
this.entries = e;
}
}
this.hashes = newHash;
this.nBuckets = newNBuckets;
this.nOccupied = size;
this.upperBound = Std.int(newNBuckets * HASH_UPPER + .5);
}
}
public function get(key:K):Null<V> {
cleanupRefs();
var idx = -1;
#if !no_map_cache
if (cachedEntry != null && cachedEntry.keyEquals(key) && ((idx = cachedIndex) != -1)) {
return cachedEntry.value;
}
#end
idx = lookup(key);
if (idx != -1) {
var entry = entries[idx];
#if !no_map_cache
cachedEntry = entry;
cachedIndex = idx;
#end
return entry.value;
}
return null;
}
private function getDefault(key:K, def:V):V {
cleanupRefs();
var idx = -1;
#if !no_map_cache
if (cachedEntry != null && cachedEntry.keyEquals(key) && ((idx = cachedIndex) != -1)) {
return cachedEntry.value;
}
#end
idx = lookup(key);
if (idx != -1) {
var entry = entries[idx];
#if !no_map_cache
cachedEntry = entry;
cachedIndex = idx;
#end
return entry.value;
}
return def;
}
public function exists(key:K):Bool {
cleanupRefs();
var idx = -1;
#if !no_map_cache
if (cachedEntry != null && cachedEntry.keyEquals(key) && ((idx = cachedIndex) != -1)) {
return true;
}
#end
idx = lookup(key);
if (idx != -1) {
var entry = entries[idx];
#if !no_map_cache
cachedEntry = entry;
cachedIndex = idx;
#end
return true;
}
return false;
}
public function remove(key:K):Bool {
cleanupRefs();
var idx = -1;
#if !no_map_cache
if (!(cachedEntry != null && cachedEntry.keyEquals(key) && ((idx = cachedIndex) != -1)))
#end
{
idx = lookup(key);
}
if (idx == -1) {
return false;
} else {
#if !no_map_cache
if (cachedEntry != null && cachedEntry.keyEquals(key)) {
cachedIndex = -1;
cachedEntry = null;
}
#end
hashes[idx] = FLAG_DEL;
entries[idx] = null;
--size;
return true;
}
}
public inline function keys():Iterator<K> {
cleanupRefs();
return new WeakMapKeyIterator(this);
}
public inline function iterator():Iterator<V> {
cleanupRefs();
return new WeakMapValueIterator(this);
}
public inline function keyValueIterator():KeyValueIterator<K, V> {
return new haxe.iterators.MapKeyValueIterator(this);
}
public function copy():WeakMap<K, V> {
var copied = new WeakMap();
for (key in keys())
copied.set(key, get(key));
return copied;
}
public function toString():String {
var s = new StringBuf();
s.add("{");
var it = keys();
for (i in it) {
s.add(Std.string(i));
s.add(" => ");
s.add(Std.string(get(i)));
if (it.hasNext())
s.add(", ");
}
s.add("}");
return s.toString();
}
public function clear():Void {
hashes = null;
entries = null;
queue = new ReferenceQueue();
nBuckets = 0;
size = 0;
nOccupied = 0;
upperBound = 0;
#if !no_map_cache
cachedEntry = null;
cachedIndex = -1;
#end
#if DEBUG_HASHTBL
totalProbes = 0;
probeTimes = 0;
sameHash = 0;
maxProbe = 0;
#end
}
extern private static inline function roundUp(x:Int):Int {
--x;
x |= (x) >>> 1;
x |= (x) >>> 2;
x |= (x) >>> 4;
x |= (x) >>> 8;
x |= (x) >>> 16;
return ++x;
}
extern private static inline function getInc(k:Int, mask:Int):Int // return 1 for linear probing
return (((k) >> 3 ^ (k) << 3) | 1) & (mask);
extern private static inline function isEither(v:HashType):Bool
return (v & 0xFFFFFFFE) == 0;
extern private static inline function isEmpty(v:HashType):Bool
return v == FLAG_EMPTY;
extern private static inline function isDel(v:HashType):Bool
return v == FLAG_DEL;
// guarantee: Whatever this function is, it will never return 0 nor 1
extern private static inline function hash(s:Dynamic):HashType {
var k:Int = untyped s.hashCode();
// k *= 357913941;
// k ^= k << 24;
// k += ~357913941;
// k ^= k >> 31;
// k ^= k << 31;
k = (k + 0x7ed55d16) + (k << 12);
k = (k ^ 0xc761c23c) ^ (k >> 19);
k = (k + 0x165667b1) + (k << 5);
k = (k + 0xd3a2646c) ^ (k << 9);
k = (k + 0xfd7046c5) + (k << 3);
k = (k ^ 0xb55a4f09) ^ (k >> 16);
var ret = k;
if (isEither(ret)) {
if (ret == 0)
ret = 2;
else
ret = 0xFFFFFFFF;
}
return ret;
}
extern private static inline function arrayCopy(sourceArray:Dynamic, sourceIndex:Int, destinationArray:Dynamic, destinationIndex:Int, length:Int):Void
java.lang.System.arraycopy(sourceArray, sourceIndex, destinationArray, destinationIndex, length);
extern private static inline function assert(x:Bool):Void {
#if DEBUG_HASHTBL
if (!x)
throw "assert failed";
#end
}
}
private class Entry<K, V> extends WeakReference<K> {
public var value:V;
public var hash(default, null):Int;
public function new(key:K, value:V, hash:Int, queue:ReferenceQueue<K>) {
super(key, queue);
this.value = value;
this.hash = hash;
}
final inline public function keyEquals(k:K):Bool {
return k != null && untyped k.equals(get());
}
}
@:access(haxe.ds.WeakMap)
private final class WeakMapKeyIterator<T:{}, V> {
var m:WeakMap<T, V>;
var i:Int;
var len:Int;
var lastKey:T;
public function new(m:WeakMap<T, V>) {
this.i = 0;
this.m = m;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!WeakMap.isEither(m.hashes[j])) {
var entry = m.entries[j], last = entry.get();
if (last != null) {
#if !no_map_cache
m.cachedIndex = i;
m.cachedEntry = entry;
#end
lastKey = last; // keep a strong reference to the key while iterating, so it doesn't get collected
i = j;
return true;
}
}
}
lastKey = null;
return false;
}
public function next():T {
i = i + 1;
return lastKey;
}
}
@:access(haxe.ds.WeakMap)
private final class WeakMapValueIterator<K:{}, T> {
var m:WeakMap<K, T>;
var i:Int;
var len:Int;
public function new(m:WeakMap<K, T>) {
this.i = 0;
this.m = m;
this.len = m.nBuckets;
}
public function hasNext():Bool {
for (j in i...len) {
if (!WeakMap.isEither(m.hashes[j]) && m.entries[j].get() != null) {
i = j;
return true;
}
}
return false;
}
public inline function next():T {
var ret = m.entries[i];
i = i + 1;
return ret.value;
}
}
private typedef HashType = Int;