Commit d5e4c406 authored by Keith Randall's avatar Keith Randall

runtime: remove size argument from hash and equal algorithms

The equal algorithm used to take the size
   equal(p, q *T, size uintptr) bool
With this change, it does not
   equal(p, q *T) bool
Similarly for the hash algorithm.

The size is rarely used, as most equal functions know the size
of the thing they are comparing.  For instance f32equal already
knows its inputs are 4 bytes in size.

For cases where the size is not known, we allocate a closure
(one for each size needed) that points to an assembly stub that
reads the size out of the closure and calls generic code that
has a size argument.

Reduces the size of the go binary by 0.07%.  Performance impact
is not measurable.

Change-Id: I6e00adf3dde7ad2974adbcff0ee91e86d2194fec
Reviewed-on: https://go-review.googlesource.com/2392Reviewed-by: default avatarRuss Cox <rsc@golang.org>
parent 60801c48
......@@ -127,11 +127,11 @@ char *runtimeimport =
"func @\"\".growslice (@\"\".typ·2 *byte, @\"\".old·3 []any, @\"\".n·4 int64) (@\"\".ary·1 []any)\n"
"func @\"\".memmove (@\"\".to·1 *any, @\"\".frm·2 *any, @\"\".length·3 uintptr)\n"
"func @\"\".memequal (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
"func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
"func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
"func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
"func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
"func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
"func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
"func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
"func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
"func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
"func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
"func @\"\".int64div (? int64, ? int64) (? int64)\n"
"func @\"\".uint64div (? uint64, ? uint64) (? uint64)\n"
"func @\"\".int64mod (? int64, ? int64) (? int64)\n"
......
......@@ -384,6 +384,7 @@ enum
SymUniq = 1<<3,
SymSiggen = 1<<4,
SymAsm = 1<<5,
SymAlgGen = 1<<6,
};
struct Sym
......
......@@ -731,7 +731,7 @@ dcommontype(Sym *s, int ot, Type *t)
dowidth(t);
alg = algtype(t);
algsym = S;
if(alg < 0)
if(alg < 0 || alg == AMEM)
algsym = dalgsym(t);
if(t->sym != nil && !isptr[t->etype])
......@@ -791,7 +791,7 @@ dcommontype(Sym *s, int ot, Type *t)
if(gcprog)
i |= KindGCProg;
ot = duint8(s, ot, i); // kind
if(alg >= 0)
if(algsym == S)
ot = dsymptr(s, ot, algarray, alg*sizeofAlg);
else
ot = dsymptr(s, ot, algsym, 0);
......@@ -1311,29 +1311,58 @@ dalgsym(Type *t)
{
int ot;
Sym *s, *hash, *hashfunc, *eq, *eqfunc;
char *p;
// dalgsym is only called for a type that needs an algorithm table,
// which implies that the type is comparable (or else it would use ANOEQ).
s = typesymprefix(".alg", t);
hash = typesymprefix(".hash", t);
genhash(hash, t);
eq = typesymprefix(".eq", t);
geneq(eq, t);
// make Go funcs (closures) for calling hash and equal from Go
hashfunc = typesymprefix(".hashfunc", t);
dsymptr(hashfunc, 0, hash, 0);
ggloblsym(hashfunc, widthptr, DUPOK|RODATA);
eqfunc = typesymprefix(".eqfunc", t);
dsymptr(eqfunc, 0, eq, 0);
ggloblsym(eqfunc, widthptr, DUPOK|RODATA);
if(algtype(t) == AMEM) {
// we use one algorithm table for all AMEM types of a given size
p = smprint(".alg%lld", t->width);
s = pkglookup(p, typepkg);
free(p);
if(s->flags & SymAlgGen)
return s;
s->flags |= SymAlgGen;
// make hash closure
p = smprint(".hashfunc%lld", t->width);
hashfunc = pkglookup(p, typepkg);
free(p);
ot = 0;
ot = dsymptr(hashfunc, ot, pkglookup("memhash_varlen", runtimepkg), 0);
ot = duintxx(hashfunc, ot, t->width, widthptr); // size encoded in closure
ggloblsym(hashfunc, ot, DUPOK|RODATA);
// make equality closure
p = smprint(".eqfunc%lld", t->width);
eqfunc = pkglookup(p, typepkg);
free(p);
ot = 0;
ot = dsymptr(eqfunc, ot, pkglookup("memequal_varlen", runtimepkg), 0);
ot = duintxx(eqfunc, ot, t->width, widthptr);
ggloblsym(eqfunc, ot, DUPOK|RODATA);
} else {
// generate an alg table specific to this type
s = typesymprefix(".alg", t);
hash = typesymprefix(".hash", t);
eq = typesymprefix(".eq", t);
hashfunc = typesymprefix(".hashfunc", t);
eqfunc = typesymprefix(".eqfunc", t);
genhash(hash, t);
geneq(eq, t);
// make Go funcs (closures) for calling hash and equal from Go
dsymptr(hashfunc, 0, hash, 0);
ggloblsym(hashfunc, widthptr, DUPOK|RODATA);
dsymptr(eqfunc, 0, eq, 0);
ggloblsym(eqfunc, widthptr, DUPOK|RODATA);
}
// ../../runtime/alg.go:/typeAlg
ot = 0;
ot = dsymptr(s, ot, hashfunc, 0);
ot = dsymptr(s, ot, eqfunc, 0);
ggloblsym(s, ot, DUPOK|RODATA);
return s;
}
......
......@@ -162,11 +162,11 @@ func growslice(typ *byte, old []any, n int64) (ary []any)
func memmove(to *any, frm *any, length uintptr)
func memequal(x, y *any, size uintptr) bool
func memequal8(x, y *any, size uintptr) bool
func memequal16(x, y *any, size uintptr) bool
func memequal32(x, y *any, size uintptr) bool
func memequal64(x, y *any, size uintptr) bool
func memequal128(x, y *any, size uintptr) bool
func memequal8(x, y *any) bool
func memequal16(x, y *any) bool
func memequal32(x, y *any) bool
func memequal64(x, y *any) bool
func memequal128(x, y *any) bool
// only used on 32-bit
func int64div(int64, int64) int64
......
......@@ -2627,7 +2627,7 @@ hashmem(Type *t)
{
Node *tfn, *n;
Sym *sym;
sym = pkglookup("memhash", runtimepkg);
n = newname(sym);
......@@ -2652,7 +2652,7 @@ hashfor(Type *t)
a = algtype1(t, nil);
switch(a) {
case AMEM:
return hashmem(t);
fatal("hashfor with AMEM type");
case AINTER:
sym = pkglookup("interhash", runtimepkg);
break;
......@@ -2684,7 +2684,6 @@ hashfor(Type *t)
tfn = nod(OTFUNC, N, N);
tfn->list = list(tfn->list, nod(ODCLFIELD, N, typenod(ptrto(t))));
tfn->list = list(tfn->list, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
tfn->list = list(tfn->list, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
tfn->rlist = list(tfn->rlist, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
typecheck(&tfn, Etype);
n->type = tfn->type;
......@@ -2710,7 +2709,7 @@ genhash(Sym *sym, Type *t)
dclcontext = PEXTERN;
markdcl();
// func sym(p *T, s uintptr, h uintptr) uintptr
// func sym(p *T, h uintptr) uintptr
fn = nod(ODCLFUNC, N, N);
fn->nname = newname(sym);
fn->nname->class = PFUNC;
......@@ -2720,8 +2719,6 @@ genhash(Sym *sym, Type *t)
n = nod(ODCLFIELD, newname(lookup("p")), typenod(ptrto(t)));
tfn->list = list(tfn->list, n);
np = n->left;
n = nod(ODCLFIELD, newname(lookup("s")), typenod(types[TUINTPTR]));
tfn->list = list(tfn->list, n);
n = nod(ODCLFIELD, newname(lookup("h")), typenod(types[TUINTPTR]));
tfn->list = list(tfn->list, n);
nh = n->left;
......@@ -2773,14 +2770,13 @@ genhash(Sym *sym, Type *t)
nh,
nod(OMUL, nh, nodintconst(mul))));
// h = hashel(&p[i], sizeof(p[i]), h)
// h = hashel(&p[i], h)
call = nod(OCALL, hashel, N);
nx = nod(OINDEX, np, ni);
nx->bounded = 1;
na = nod(OADDR, nx, N);
na->etype = 1; // no escape to heap
call->list = list(call->list, na);
call->list = list(call->list, nodintconst(t->type->width));
call->list = list(call->list, nh);
n->nbody = list(n->nbody, nod(OAS, nh, call));
......@@ -2813,8 +2809,8 @@ genhash(Sym *sym, Type *t)
na = nod(OADDR, nx, N);
na->etype = 1; // no escape to heap
call->list = list(call->list, na);
call->list = list(call->list, nodintconst(size));
call->list = list(call->list, nh);
call->list = list(call->list, nodintconst(size));
fn->nbody = list(fn->nbody, nod(OAS, nh, call));
first = T;
......@@ -2825,16 +2821,28 @@ genhash(Sym *sym, Type *t)
continue;
// Run hash for this field.
hashel = hashfor(t1->type);
// h = hashel(&p.t1, size, h)
call = nod(OCALL, hashel, N);
nx = nod(OXDOT, np, newname(t1->sym)); // TODO: fields from other packages?
na = nod(OADDR, nx, N);
na->etype = 1; // no escape to heap
call->list = list(call->list, na);
call->list = list(call->list, nodintconst(t1->type->width));
call->list = list(call->list, nh);
fn->nbody = list(fn->nbody, nod(OAS, nh, call));
if(algtype1(t1->type, nil) == AMEM) {
hashel = hashmem(t1->type);
// h = memhash(&p.t1, h, size)
call = nod(OCALL, hashel, N);
nx = nod(OXDOT, np, newname(t1->sym)); // TODO: fields from other packages?
na = nod(OADDR, nx, N);
na->etype = 1; // no escape to heap
call->list = list(call->list, na);
call->list = list(call->list, nh);
call->list = list(call->list, nodintconst(t1->type->width));
fn->nbody = list(fn->nbody, nod(OAS, nh, call));
} else {
hashel = hashfor(t1->type);
// h = hashel(&p.t1, h)
call = nod(OCALL, hashel, N);
nx = nod(OXDOT, np, newname(t1->sym)); // TODO: fields from other packages?
na = nod(OADDR, nx, N);
na->etype = 1; // no escape to heap
call->list = list(call->list, na);
call->list = list(call->list, nh);
fn->nbody = list(fn->nbody, nod(OAS, nh, call));
}
}
break;
}
......@@ -2881,7 +2889,7 @@ eqfield(Node *p, Node *q, Node *field)
}
static Node*
eqmemfunc(vlong size, Type *type)
eqmemfunc(vlong size, Type *type, int *needsize)
{
char buf[30];
Node *fn;
......@@ -2889,6 +2897,7 @@ eqmemfunc(vlong size, Type *type)
switch(size) {
default:
fn = syslook("memequal", 1);
*needsize = 1;
break;
case 1:
case 2:
......@@ -2897,6 +2906,7 @@ eqmemfunc(vlong size, Type *type)
case 16:
snprint(buf, sizeof buf, "memequal%d", (int)size*8);
fn = syslook(buf, 1);
*needsize = 0;
break;
}
argtype(fn, type);
......@@ -2905,11 +2915,12 @@ eqmemfunc(vlong size, Type *type)
}
// Return node for
// if !memequal(&p.field, &q.field, size) { return false }
// if !memequal(&p.field, &q.field [, size]) { return false }
static Node*
eqmem(Node *p, Node *q, Node *field, vlong size)
{
Node *nif, *nx, *ny, *call, *r;
int needsize;
nx = nod(OADDR, nod(OXDOT, p, field), N);
nx->etype = 1; // does not escape
......@@ -2918,10 +2929,11 @@ eqmem(Node *p, Node *q, Node *field, vlong size)
typecheck(&nx, Erv);
typecheck(&ny, Erv);
call = nod(OCALL, eqmemfunc(size, nx->type->type), N);
call = nod(OCALL, eqmemfunc(size, nx->type->type, &needsize), N);
call->list = list(call->list, nx);
call->list = list(call->list, ny);
call->list = list(call->list, nodintconst(size));
if(needsize)
call->list = list(call->list, nodintconst(size));
nif = nod(OIF, N, N);
nif->ninit = list(nif->ninit, call);
......@@ -2951,7 +2963,7 @@ geneq(Sym *sym, Type *t)
dclcontext = PEXTERN;
markdcl();
// func sym(p, q *T, s uintptr) bool
// func sym(p, q *T) bool
fn = nod(ODCLFUNC, N, N);
fn->nname = newname(sym);
fn->nname->class = PFUNC;
......@@ -2964,8 +2976,6 @@ geneq(Sym *sym, Type *t)
n = nod(ODCLFIELD, newname(lookup("q")), typenod(ptrto(t)));
tfn->list = list(tfn->list, n);
nq = n->left;
n = nod(ODCLFIELD, newname(lookup("s")), typenod(types[TUINTPTR]));
tfn->list = list(tfn->list, n);
n = nod(ODCLFIELD, N, typenod(types[TBOOL]));
tfn->rlist = list(tfn->rlist, n);
......
......@@ -3194,7 +3194,7 @@ sliceany(Node* n, NodeList **init)
}
static Node*
eqfor(Type *t)
eqfor(Type *t, int *needsize)
{
int a;
Node *n;
......@@ -3213,6 +3213,7 @@ eqfor(Type *t)
n = syslook("memequal", 1);
argtype(n, t);
argtype(n, t);
*needsize = 1;
return n;
}
......@@ -3222,10 +3223,10 @@ eqfor(Type *t)
ntype = nod(OTFUNC, N, N);
ntype->list = list(ntype->list, nod(ODCLFIELD, N, typenod(ptrto(t))));
ntype->list = list(ntype->list, nod(ODCLFIELD, N, typenod(ptrto(t))));
ntype->list = list(ntype->list, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
ntype->rlist = list(ntype->rlist, nod(ODCLFIELD, N, typenod(types[TBOOL])));
typecheck(&ntype, Etype);
n->type = ntype->type;
*needsize = 0;
return n;
}
......@@ -3245,7 +3246,7 @@ static void
walkcompare(Node **np, NodeList **init)
{
Node *n, *l, *r, *call, *a, *li, *ri, *expr, *cmpl, *cmpr;
int andor, i;
int andor, i, needsize;
Type *t, *t1;
n = *np;
......@@ -3333,10 +3334,11 @@ walkcompare(Node **np, NodeList **init)
}
// Chose not to inline. Call equality function directly.
call = nod(OCALL, eqfor(t), N);
call = nod(OCALL, eqfor(t, &needsize), N);
call->list = list(call->list, l);
call->list = list(call->list, r);
call->list = list(call->list, nodintconst(t->width));
if(needsize)
call->list = list(call->list, nodintconst(t->width));
r = call;
if(n->op != OEQ)
r = nod(ONOT, r, N);
......
......@@ -40,21 +40,45 @@ const (
type typeAlg struct {
// function for hashing objects of this type
// (ptr to object, size, seed) -> hash
hash func(unsafe.Pointer, uintptr, uintptr) uintptr
// (ptr to object, seed) -> hash
hash func(unsafe.Pointer, uintptr) uintptr
// function for comparing objects of this type
// (ptr to object A, ptr to object B, size) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
// (ptr to object A, ptr to object B) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer) bool
}
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return h
}
func memhash8(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 1)
}
func memhash16(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 2)
}
func memhash32(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 4)
}
func memhash64(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 8)
}
func memhash128(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 16)
}
// memhash_varlen is defined in assembly because it needs access
// to the closure. It appears here to provide an argument
// signature for the assembly routine.
func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr
var algarray = [alg_max]typeAlg{
alg_MEM: {memhash, memequal},
alg_MEM0: {memhash, memequal0},
alg_MEM8: {memhash, memequal8},
alg_MEM16: {memhash, memequal16},
alg_MEM32: {memhash, memequal32},
alg_MEM64: {memhash, memequal64},
alg_MEM128: {memhash, memequal128},
alg_MEM: {nil, nil}, // not used
alg_MEM0: {memhash0, memequal0},
alg_MEM8: {memhash8, memequal8},
alg_MEM16: {memhash16, memequal16},
alg_MEM32: {memhash32, memequal32},
alg_MEM64: {memhash64, memequal64},
alg_MEM128: {memhash128, memequal128},
alg_NOEQ: {nil, nil},
alg_NOEQ0: {nil, nil},
alg_NOEQ8: {nil, nil},
......@@ -75,14 +99,14 @@ var algarray = [alg_max]typeAlg{
var useAeshash bool
// in asm_*.s
func aeshash(p unsafe.Pointer, s, h uintptr) uintptr
func aeshash32(p unsafe.Pointer, s, h uintptr) uintptr
func aeshash64(p unsafe.Pointer, s, h uintptr) uintptr
func aeshashstr(p unsafe.Pointer, s, h uintptr) uintptr
func aeshash(p unsafe.Pointer, h, s uintptr) uintptr
func aeshash32(p unsafe.Pointer, h uintptr) uintptr
func aeshash64(p unsafe.Pointer, h uintptr) uintptr
func aeshashstr(p unsafe.Pointer, h uintptr) uintptr
func strhash(a unsafe.Pointer, s, h uintptr) uintptr {
func strhash(a unsafe.Pointer, h uintptr) uintptr {
x := (*stringStruct)(a)
return memhash(x.str, uintptr(x.len), h)
return memhash(x.str, h, uintptr(x.len))
}
// NOTE: Because NaN != NaN, a map can contain any
......@@ -90,7 +114,7 @@ func strhash(a unsafe.Pointer, s, h uintptr) uintptr {
// To avoid long hash chains, we assign a random number
// as the hash value for a NaN.
func f32hash(p unsafe.Pointer, s, h uintptr) uintptr {
func f32hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float32)(p)
switch {
case f == 0:
......@@ -98,11 +122,11 @@ func f32hash(p unsafe.Pointer, s, h uintptr) uintptr {
case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
default:
return memhash(p, 4, h)
return memhash(p, h, 4)
}
}
func f64hash(p unsafe.Pointer, s, h uintptr) uintptr {
func f64hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float64)(p)
switch {
case f == 0:
......@@ -110,21 +134,21 @@ func f64hash(p unsafe.Pointer, s, h uintptr) uintptr {
case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
default:
return memhash(p, 8, h)
return memhash(p, h, 8)
}
}
func c64hash(p unsafe.Pointer, s, h uintptr) uintptr {
func c64hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float32)(p)
return f32hash(unsafe.Pointer(&x[1]), 4, f32hash(unsafe.Pointer(&x[0]), 4, h))
return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
}
func c128hash(p unsafe.Pointer, s, h uintptr) uintptr {
func c128hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float64)(p)
return f64hash(unsafe.Pointer(&x[1]), 8, f64hash(unsafe.Pointer(&x[0]), 8, h))
return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
}
func interhash(p unsafe.Pointer, s, h uintptr) uintptr {
func interhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*iface)(p)
tab := a.tab
if tab == nil {
......@@ -136,13 +160,13 @@ func interhash(p unsafe.Pointer, s, h uintptr) uintptr {
panic(errorString("hash of unhashable type " + *t._string))
}
if isDirectIface(t) {
return c1 * fn(unsafe.Pointer(&a.data), uintptr(t.size), h^c0)
return c1 * fn(unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * fn(a.data, uintptr(t.size), h^c0)
return c1 * fn(a.data, h^c0)
}
}
func nilinterhash(p unsafe.Pointer, s, h uintptr) uintptr {
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*eface)(p)
t := a._type
if t == nil {
......@@ -153,9 +177,9 @@ func nilinterhash(p unsafe.Pointer, s, h uintptr) uintptr {
panic(errorString("hash of unhashable type " + *t._string))
}
if isDirectIface(t) {
return c1 * fn(unsafe.Pointer(&a.data), uintptr(t.size), h^c0)
return c1 * fn(unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * fn(a.data, uintptr(t.size), h^c0)
return c1 * fn(a.data, h^c0)
}
}
......@@ -166,47 +190,47 @@ func memequal(p, q unsafe.Pointer, size uintptr) bool {
return memeq(p, q, size)
}
func memequal0(p, q unsafe.Pointer, size uintptr) bool {
func memequal0(p, q unsafe.Pointer) bool {
return true
}
func memequal8(p, q unsafe.Pointer, size uintptr) bool {
func memequal8(p, q unsafe.Pointer) bool {
return *(*int8)(p) == *(*int8)(q)
}
func memequal16(p, q unsafe.Pointer, size uintptr) bool {
func memequal16(p, q unsafe.Pointer) bool {
return *(*int16)(p) == *(*int16)(q)
}
func memequal32(p, q unsafe.Pointer, size uintptr) bool {
func memequal32(p, q unsafe.Pointer) bool {
return *(*int32)(p) == *(*int32)(q)
}
func memequal64(p, q unsafe.Pointer, size uintptr) bool {
func memequal64(p, q unsafe.Pointer) bool {
return *(*int64)(p) == *(*int64)(q)
}
func memequal128(p, q unsafe.Pointer, size uintptr) bool {
func memequal128(p, q unsafe.Pointer) bool {
return *(*[2]int64)(p) == *(*[2]int64)(q)
}
func f32equal(p, q unsafe.Pointer, size uintptr) bool {
func f32equal(p, q unsafe.Pointer) bool {
return *(*float32)(p) == *(*float32)(q)
}
func f64equal(p, q unsafe.Pointer, size uintptr) bool {
func f64equal(p, q unsafe.Pointer) bool {
return *(*float64)(p) == *(*float64)(q)
}
func c64equal(p, q unsafe.Pointer, size uintptr) bool {
func c64equal(p, q unsafe.Pointer) bool {
return *(*complex64)(p) == *(*complex64)(q)
}
func c128equal(p, q unsafe.Pointer, size uintptr) bool {
func c128equal(p, q unsafe.Pointer) bool {
return *(*complex128)(p) == *(*complex128)(q)
}
func strequal(p, q unsafe.Pointer, size uintptr) bool {
func strequal(p, q unsafe.Pointer) bool {
return *(*string)(p) == *(*string)(q)
}
func interequal(p, q unsafe.Pointer, size uintptr) bool {
func interequal(p, q unsafe.Pointer) bool {
return ifaceeq(*(*interface {
f()
})(p), *(*interface {
f()
})(q))
}
func nilinterequal(p, q unsafe.Pointer, size uintptr) bool {
func nilinterequal(p, q unsafe.Pointer) bool {
return efaceeq(*(*interface{})(p), *(*interface{})(q))
}
func efaceeq(p, q interface{}) bool {
......@@ -224,9 +248,9 @@ func efaceeq(p, q interface{}) bool {
panic(errorString("comparing uncomparable type " + *t._string))
}
if isDirectIface(t) {
return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)), uintptr(t.size))
return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)))
}
return eq(x.data, y.data, uintptr(t.size))
return eq(x.data, y.data)
}
func ifaceeq(p, q interface {
f()
......@@ -246,37 +270,37 @@ func ifaceeq(p, q interface {
panic(errorString("comparing uncomparable type " + *t._string))
}
if isDirectIface(t) {
return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)), uintptr(t.size))
return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)))
}
return eq(x.data, y.data, uintptr(t.size))
return eq(x.data, y.data)
}
// Testing adapters for hash quality tests (see hash_test.go)
func stringHash(s string, seed uintptr) uintptr {
return algarray[alg_STRING].hash(noescape(unsafe.Pointer(&s)), unsafe.Sizeof(s), seed)
return algarray[alg_STRING].hash(noescape(unsafe.Pointer(&s)), seed)
}
func bytesHash(b []byte, seed uintptr) uintptr {
s := (*sliceStruct)(unsafe.Pointer(&b))
return algarray[alg_MEM].hash(s.array, uintptr(s.len), seed)
return memhash(s.array, seed, uintptr(s.len))
}
func int32Hash(i uint32, seed uintptr) uintptr {
return algarray[alg_MEM32].hash(noescape(unsafe.Pointer(&i)), 4, seed)
return algarray[alg_MEM32].hash(noescape(unsafe.Pointer(&i)), seed)
}
func int64Hash(i uint64, seed uintptr) uintptr {
return algarray[alg_MEM64].hash(noescape(unsafe.Pointer(&i)), 8, seed)
return algarray[alg_MEM64].hash(noescape(unsafe.Pointer(&i)), seed)
}
func efaceHash(i interface{}, seed uintptr) uintptr {
return algarray[alg_NILINTER].hash(noescape(unsafe.Pointer(&i)), unsafe.Sizeof(i), seed)
return algarray[alg_NILINTER].hash(noescape(unsafe.Pointer(&i)), seed)
}
func ifaceHash(i interface {
F()
}, seed uintptr) uintptr {
return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), unsafe.Sizeof(i), seed)
return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), seed)
}
// Testing adapter for memclr
......@@ -301,12 +325,8 @@ func init() {
cpuid_ecx&(1<<9) != 0 && // sse3 (pshufb)
cpuid_ecx&(1<<19) != 0 { // sse4.1 (pinsr{d,q})
useAeshash = true
algarray[alg_MEM].hash = aeshash
algarray[alg_MEM8].hash = aeshash
algarray[alg_MEM16].hash = aeshash
algarray[alg_MEM32].hash = aeshash32
algarray[alg_MEM64].hash = aeshash64
algarray[alg_MEM128].hash = aeshash
algarray[alg_STRING].hash = aeshashstr
// Initialize with random data so hash collisions will be hard to engineer.
getRandomData(aeskeysched[:])
......
......@@ -887,23 +887,42 @@ TEXT runtime·emptyfunc(SB),0,$0-0
TEXT runtime·abort(SB),NOSPLIT,$0-0
INT $0x3
// memhash_varlen(p unsafe.Pointer, h seed) uintptr
// redirects to memhash(p, h, size) using the size
// stored in the closure.
TEXT runtime·memhash_varlen(SB),NOSPLIT,$16-12
GO_ARGS
NO_LOCAL_POINTERS
MOVL p+0(FP), AX
MOVL h+4(FP), BX
MOVL 4(DX), CX
MOVL AX, 0(SP)
MOVL BX, 4(SP)
MOVL CX, 8(SP)
CALL runtime·memhash(SB)
MOVL 12(SP), AX
MOVL AX, ret+8(FP)
RET
// hash function using AES hardware instructions
TEXT runtime·aeshash(SB),NOSPLIT,$0-16
MOVL p+0(FP), AX // ptr to data
MOVL s+4(FP), CX // size
MOVL s+8(FP), CX // size
LEAL ret+12(FP), DX
JMP runtime·aeshashbody(SB)
TEXT runtime·aeshashstr(SB),NOSPLIT,$0-16
TEXT runtime·aeshashstr(SB),NOSPLIT,$0-12
MOVL p+0(FP), AX // ptr to string object
// s+4(FP) is ignored, it is always sizeof(String)
MOVL 4(AX), CX // length of string
MOVL (AX), AX // string data
LEAL ret+8(FP), DX
JMP runtime·aeshashbody(SB)
// AX: data
// CX: length
TEXT runtime·aeshashbody(SB),NOSPLIT,$0-16
MOVL h+8(FP), X6 // seed to low 64 bits of xmm6
// DX: address to put return value
TEXT runtime·aeshashbody(SB),NOSPLIT,$0-0
MOVL h+4(FP), X6 // seed to low 64 bits of xmm6
PINSRD $2, CX, X6 // size to high 64 bits of xmm6
PSHUFHW $0, X6, X6 // replace size with its low 2 bytes repeated 4 times
MOVO runtime·aeskeysched(SB), X7
......@@ -934,7 +953,7 @@ aes0to15:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
MOVL X0, ret+12(FP)
MOVL X0, (DX)
RET
endofpage:
......@@ -947,13 +966,13 @@ endofpage:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
MOVL X0, ret+12(FP)
MOVL X0, (DX)
RET
aes0:
// return input seed
MOVL h+8(FP), AX
MOVL AX, ret+12(FP)
MOVL h+4(FP), AX
MOVL AX, (DX)
RET
aes16:
......@@ -961,7 +980,7 @@ aes16:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
MOVL X0, ret+12(FP)
MOVL X0, (DX)
RET
......@@ -980,7 +999,7 @@ aes17to32:
// combine results
PXOR X1, X0
MOVL X0, ret+12(FP)
MOVL X0, (DX)
RET
aes33to64:
......@@ -1005,7 +1024,7 @@ aes33to64:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
MOVL X0, ret+12(FP)
MOVL X0, (DX)
RET
aes65plus:
......@@ -1059,29 +1078,27 @@ aesloop:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
MOVL X0, ret+12(FP)
MOVL X0, (DX)
RET
TEXT runtime·aeshash32(SB),NOSPLIT,$0-16
TEXT runtime·aeshash32(SB),NOSPLIT,$0-12
MOVL p+0(FP), AX // ptr to data
// s+4(FP) is ignored, it is always sizeof(int32)
MOVL h+8(FP), X0 // seed
MOVL h+4(FP), X0 // seed
PINSRD $1, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
MOVL X0, ret+12(FP)
MOVL X0, ret+8(FP)
RET
TEXT runtime·aeshash64(SB),NOSPLIT,$0-16
TEXT runtime·aeshash64(SB),NOSPLIT,$0-12
MOVL p+0(FP), AX // ptr to data
// s+4(FP) is ignored, it is always sizeof(int64)
MOVQ (AX), X0 // data
PINSRD $2, h+8(FP), X0 // seed
PINSRD $2, h+4(FP), X0 // seed
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
MOVL X0, ret+12(FP)
MOVL X0, ret+8(FP)
RET
// simple mask to get rid of data in the high part of the register.
......@@ -1260,6 +1277,20 @@ TEXT runtime·memeq(SB),NOSPLIT,$0-13
MOVB AX, ret+12(FP)
RET
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
MOVL a+0(FP), SI
MOVL b+4(FP), DI
CMPL SI, DI
JEQ eq
MOVL 4(DX), BX // compiler stores size at offset 4 in the closure
CALL runtime·memeqbody(SB)
MOVB AX, ret+8(FP)
RET
eq:
MOVB $1, ret+8(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
......
......@@ -853,23 +853,42 @@ TEXT runtime·cputicks(SB),NOSPLIT,$0-0
MOVQ AX, ret+0(FP)
RET
// memhash_varlen(p unsafe.Pointer, h seed) uintptr
// redirects to memhash(p, h, size) using the size
// stored in the closure.
TEXT runtime·memhash_varlen(SB),NOSPLIT,$32-24
GO_ARGS
NO_LOCAL_POINTERS
MOVQ p+0(FP), AX
MOVQ h+8(FP), BX
MOVQ 8(DX), CX
MOVQ AX, 0(SP)
MOVQ BX, 8(SP)
MOVQ CX, 16(SP)
CALL runtime·memhash(SB)
MOVQ 24(SP), AX
MOVQ AX, ret+16(FP)
RET
// hash function using AES hardware instructions
TEXT runtime·aeshash(SB),NOSPLIT,$0-32
MOVQ p+0(FP), AX // ptr to data
MOVQ s+8(FP), CX // size
MOVQ s+16(FP), CX // size
LEAQ ret+24(FP), DX
JMP runtime·aeshashbody(SB)
TEXT runtime·aeshashstr(SB),NOSPLIT,$0-32
TEXT runtime·aeshashstr(SB),NOSPLIT,$0-24
MOVQ p+0(FP), AX // ptr to string struct
// s+8(FP) is ignored, it is always sizeof(String)
MOVQ 8(AX), CX // length of string
MOVQ (AX), AX // string data
LEAQ ret+16(FP), DX
JMP runtime·aeshashbody(SB)
// AX: data
// CX: length
TEXT runtime·aeshashbody(SB),NOSPLIT,$0-32
MOVQ h+16(FP), X6 // seed to low 64 bits of xmm6
// DX: address to put return value
TEXT runtime·aeshashbody(SB),NOSPLIT,$0-0
MOVQ h+8(FP), X6 // seed to low 64 bits of xmm6
PINSRQ $1, CX, X6 // size to high 64 bits of xmm6
PSHUFHW $0, X6, X6 // replace size with its low 2 bytes repeated 4 times
MOVO runtime·aeskeysched(SB), X7
......@@ -903,7 +922,7 @@ aes0to15:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
MOVQ X0, ret+24(FP)
MOVQ X0, (DX)
RET
endofpage:
......@@ -917,13 +936,13 @@ endofpage:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
MOVQ X0, ret+24(FP)
MOVQ X0, (DX)
RET
aes0:
// return input seed
MOVQ h+16(FP), AX
MOVQ AX, ret+24(FP)
MOVQ h+8(FP), AX
MOVQ AX, (DX)
RET
aes16:
......@@ -931,7 +950,7 @@ aes16:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
MOVQ X0, ret+24(FP)
MOVQ X0, (DX)
RET
aes17to32:
......@@ -949,7 +968,7 @@ aes17to32:
// combine results
PXOR X1, X0
MOVQ X0, ret+24(FP)
MOVQ X0, (DX)
RET
aes33to64:
......@@ -974,7 +993,7 @@ aes33to64:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
MOVQ X0, ret+24(FP)
MOVQ X0, (DX)
RET
aes65to128:
......@@ -1019,7 +1038,7 @@ aes65to128:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
MOVQ X0, ret+24(FP)
MOVQ X0, (DX)
RET
aes129plus:
......@@ -1105,29 +1124,27 @@ aesloop:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
MOVQ X0, ret+24(FP)
MOVQ X0, (DX)
RET
TEXT runtime·aeshash32(SB),NOSPLIT,$0-32
TEXT runtime·aeshash32(SB),NOSPLIT,$0-24
MOVQ p+0(FP), AX // ptr to data
// s+8(FP) is ignored, it is always sizeof(int32)
MOVQ h+16(FP), X0 // seed
MOVQ h+8(FP), X0 // seed
PINSRD $2, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
MOVQ X0, ret+24(FP)
MOVQ X0, ret+16(FP)
RET
TEXT runtime·aeshash64(SB),NOSPLIT,$0-32
TEXT runtime·aeshash64(SB),NOSPLIT,$0-24
MOVQ p+0(FP), AX // ptr to data
// s+8(FP) is ignored, it is always sizeof(int64)
MOVQ h+16(FP), X0 // seed
MOVQ h+8(FP), X0 // seed
PINSRQ $1, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
MOVQ X0, ret+24(FP)
MOVQ X0, ret+16(FP)
RET
// simple mask to get rid of data in the high part of the register.
......@@ -1210,6 +1227,20 @@ TEXT runtime·memeq(SB),NOSPLIT,$0-25
MOVB AX, ret+24(FP)
RET
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-17
MOVQ a+0(FP), SI
MOVQ b+8(FP), DI
CMPQ SI, DI
JEQ eq
MOVQ 8(DX), BX // compiler stores size at offset 8 in the closure
CALL runtime·memeqbody(SB)
MOVB AX, ret+16(FP)
RET
eq:
MOVB $1, ret+16(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
......
......@@ -642,6 +642,23 @@ TEXT runtime·cputicks(SB),NOSPLIT,$0-0
MOVQ AX, ret+0(FP)
RET
// memhash_varlen(p unsafe.Pointer, h seed) uintptr
// redirects to memhash(p, h, size) using the size
// stored in the closure.
TEXT runtime·memhash_varlen(SB),NOSPLIT,$20-12
GO_ARGS
NO_LOCAL_POINTERS
MOVL p+0(FP), AX
MOVL h+4(FP), BX
MOVL 4(DX), CX
MOVL AX, 0(SP)
MOVL BX, 4(SP)
MOVL CX, 8(SP)
CALL runtime·memhash(SB)
MOVL 16(SP), AX
MOVL AX, ret+8(FP)
RET
// hash function using AES hardware instructions
// For now, our one amd64p32 system (NaCl) does not
// support using AES instructions, so have not bothered to
......@@ -672,6 +689,20 @@ TEXT runtime·memeq(SB),NOSPLIT,$0-17
MOVB AX, ret+16(FP)
RET
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
MOVL a+0(FP), SI
MOVL b+4(FP), DI
CMPL SI, DI
JEQ eq
MOVL 4(DX), BX // compiler stores size at offset 4 in the closure
CALL runtime·memeqbody(SB)
MOVB AX, ret+8(FP)
RET
eq:
MOVB $1, ret+8(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
......
......@@ -750,6 +750,23 @@ TEXT runtime·aeshashstr(SB),NOSPLIT,$-4-0
MOVW $0, R0
MOVW (R0), R1
// memhash_varlen(p unsafe.Pointer, h seed) uintptr
// redirects to memhash(p, h, size) using the size
// stored in the closure.
TEXT runtime·memhash_varlen(SB),NOSPLIT,$16-12
GO_ARGS
NO_LOCAL_POINTERS
MOVW p+0(FP), R0
MOVW h+4(FP), R1
MOVW 4(R7), R2
MOVW R0, 4(R13)
MOVW R1, 8(R13)
MOVW R2, 12(R13)
BL runtime·memhash(SB)
MOVW 16(R13), R0
MOVW R0, ret+8(FP)
RET
TEXT runtime·memeq(SB),NOSPLIT,$-4-13
MOVW a+0(FP), R1
MOVW b+4(FP), R2
......@@ -769,6 +786,25 @@ loop:
MOVB R0, ret+12(FP)
RET
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen(SB),NOSPLIT,$16-9
MOVW a+0(FP), R0
MOVW b+4(FP), R1
CMP R0, R1
BEQ eq
MOVW 4(R7), R2 // compiler stores size at offset 4 in the closure
MOVW R0, 4(R13)
MOVW R1, 8(R13)
MOVW R2, 12(R13)
BL runtime·memeq(SB)
MOVB 16(R13), R0
MOVB R0, ret+8(FP)
RET
eq:
MOVW $1, R0
MOVB R0, ret+8(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
......
......@@ -900,6 +900,23 @@ TEXT runtime·cputicks(SB),NOSPLIT,$0-8
MOVD R3, ret+0(FP)
RETURN
// memhash_varlen(p unsafe.Pointer, h seed) uintptr
// redirects to memhash(p, h, size) using the size
// stored in the closure.
TEXT runtime·memhash_varlen(SB),NOSPLIT,$40-24
GO_ARGS
NO_LOCAL_POINTERS
MOVD p+0(FP), R3
MOVD h+8(FP), R4
MOVD 8(R11), R5
MOVD R3, 8(R1)
MOVD R4, 16(R1)
MOVD R5, 24(R1)
BL runtime·memhash(SB)
MOVD 32(R1), R3
MOVD R3, ret+16(FP)
RETURN
// AES hashing not implemented for ppc64
TEXT runtime·aeshash(SB),NOSPLIT,$-8-0
MOVW (R0), R1
......@@ -932,6 +949,25 @@ test:
MOVB R0, ret+24(FP)
RETURN
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
MOVD a+0(FP), R3
MOVD b+8(FP), R4
CMP R3, R4
BEQ eq
MOVD 8(R11), R5 // compiler stores size at offset 8 in the closure
MOVD R3, 8(R1)
MOVD R4, 16(R1)
MOVD R5, 24(R1)
BL runtime·memeq(SB)
MOVBZ 32(R1), R3
MOVB R3, ret+16(FP)
RETURN
eq:
MOVD $1, R3
MOVB R3, ret+16(FP)
RETURN
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
......
......@@ -20,9 +20,9 @@ const (
m4 = 2336365089
)
func memhash(p unsafe.Pointer, s, seed uintptr) uintptr {
func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
if GOARCH == "386" && GOOS != "nacl" && useAeshash {
return aeshash(p, s, seed)
return aeshash(p, seed, s)
}
h := uint32(seed + s*hashkey[0])
tail:
......
......@@ -20,9 +20,9 @@ const (
m4 = 15839092249703872147
)
func memhash(p unsafe.Pointer, s, seed uintptr) uintptr {
func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
if GOARCH == "amd64" && GOOS != "nacl" && useAeshash {
return aeshash(p, s, seed)
return aeshash(p, seed, s)
}
h := uint64(seed + s*hashkey[0])
tail:
......
......@@ -252,7 +252,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(t.elem.zero)
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -274,7 +274,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
......@@ -300,7 +300,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
return unsafe.Pointer(t.elem.zero), false
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -322,7 +322,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
......@@ -343,7 +343,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
return nil, nil
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -365,7 +365,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
......@@ -393,7 +393,7 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
if h.buckets == nil {
if checkgc {
......@@ -431,7 +431,7 @@ again:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2, uintptr(t.key.size)) {
if !alg.equal(key, k2) {
continue
}
// already have a mapping for key. Update it.
......@@ -503,7 +503,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
return
}
alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
bucket := hash & (uintptr(1)<<h.B - 1)
if h.oldbuckets != nil {
growWork(t, h, bucket)
......@@ -523,7 +523,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2, uintptr(t.key.size)) {
if !alg.equal(key, k2) {
continue
}
memclr(k, uintptr(t.keysize))
......@@ -660,10 +660,10 @@ next:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if t.reflexivekey || alg.equal(k2, k2, uintptr(t.key.size)) {
if t.reflexivekey || alg.equal(k2, k2) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(k2, uintptr(h.hash0))
if hash&(uintptr(1)<<it.B-1) != checkBucket {
continue
}
......@@ -697,7 +697,7 @@ next:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if t.reflexivekey || alg.equal(k2, k2, uintptr(t.key.size)) {
if t.reflexivekey || alg.equal(k2, k2) {
// Check the current hash table for the data.
// This code handles the case where the key
// has been deleted, updated, or deleted and reinserted.
......@@ -804,9 +804,9 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
}
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
hash := alg.hash(k2, uintptr(h.hash0))
if h.flags&iterator != 0 {
if !t.reflexivekey && !alg.equal(k2, k2, uintptr(t.key.size)) {
if !t.reflexivekey && !alg.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
......
......@@ -21,7 +21,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -63,7 +63,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -105,7 +105,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -147,7 +147,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -244,7 +244,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return unsafe.Pointer(t.elem.zero)
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......@@ -344,7 +344,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return unsafe.Pointer(t.elem.zero), false
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
......
......@@ -503,7 +503,7 @@ func extendRandom(r []byte, n int) {
if w > 16 {
w = 16
}
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(w), uintptr(nanotime()))
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
for i := 0; i < ptrSize && n < len(r); i++ {
r[n] = byte(h)
n++
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment