linux内核哈希(Hash)函数

No. 0

[include/linux/hash.h]

#ifndef _LINUX_HASH_H
#define _LINUX_HASH_H
/* Fast hashing routine for ints,  longs and pointers.
   (C) 2002 William Lee Irwin III, IBM */

/*
 * Knuth recommends primes in approximately golden ratio to the maximum
 * integer representable by a machine word for multiplicative hashing.
 * Chuck Lever verified the effectiveness of this technique:
 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
 *
 * These primes are chosen to be bit-sparse, that is operations on
 * them can use shifts and additions instead of multiplications for
 * machines where multiplications are slow.
 */

#include <asm/types.h>

/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL

#if BITS_PER_LONG == 32
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
#define hash_long(val, bits) hash_32(val, bits)
#elif BITS_PER_LONG == 64
#define hash_long(val, bits) hash_64(val, bits)
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
#else
#error Wordsize not 32 or 64
#endif

static inline u64 hash_64(u64 val, unsigned int bits)
{
    u64 hash = val;

    /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
    u64 n = hash;
    n <<= 18;
    hash -= n;
    n <<= 33;
    hash -= n;
    n <<= 3;
    hash += n;
    n <<= 3;
    hash -= n;
    n <<= 4;
    hash += n;
    n <<= 2;
    hash += n;

    /* High bits are more random, so use them. */
    return hash >> (64 - bits);
}

static inline u32 hash_32(u32 val, unsigned int bits)
{
    /* On some cpus multiply is faster, on others gcc will do shifts */
    u32 hash = val * GOLDEN_RATIO_PRIME_32;

    /* High bits are more random, so use them. */
    return hash >> (32 - bits);
}

static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
{
    return hash_long((unsigned long)ptr, bits);
}
#endif /* _LINUX_HASH_H */

No. 1

[/arch/ia64/kernel/unwind.c]

static inline unw_hash_index_t
hash (unsigned long ip)
{
#define hashmagic   0x9e3779b97f4a7c16UL    /* based on (sqrt(5)/2-1)*2^64 */

    return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
#undef hashmagic
}

#define UNW_LOG_CACHE_SIZE  7   /* each unw_script is ~256 bytes in size */
#define UNW_CACHE_SIZE      (1 << UNW_LOG_CACHE_SIZE)

#define UNW_LOG_HASH_SIZE   (UNW_LOG_CACHE_SIZE + 1)
#define UNW_HASH_SIZE       (1 << UNW_LOG_HASH_SIZE)

No. 2

[/fs/block_dev.c]

#define MINORBITS   20
#define MINORMASK   ((1U << MINORBITS) - 1)

#define MAJOR(dev)  ((unsigned int) ((dev) >> MINORBITS))
#define MINOR(dev)  ((unsigned int) ((dev) & MINORMASK))

/*
 * Most likely _very_ bad one - but then it's hardly critical for small
 * /dev and can be fixed when somebody will need really large one.
 * Keep in mind that it will be fed through icache hash function too.
 */
static inline unsigned long hash(dev_t dev)
{
    return MAJOR(dev)+MINOR(dev);
}

No. 3

static inline unsigned long hash(struct super_block *sb, unsigned long hashval)
{
    unsigned long tmp;

    tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
            L1_CACHE_BYTES;
    tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
    return tmp & I_HASHMASK;
}

/*
 * Knuth recommends primes in approximately golden ratio to the maximum
 * integer representable by a machine word for multiplicative hashing.
 * Chuck Lever verified the effectiveness of this technique:
 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
 *
 * These primes are chosen to be bit-sparse, that is operations on
 * them can use shifts and additions instead of multiplications for
 * machines where multiplications are slow.
 */
#if BITS_PER_LONG == 32
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e370001UL
#elif BITS_PER_LONG == 64
/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
#else
#error Define GOLDEN_RATIO_PRIME for your wordsize.
#endif

/*
 * Inode lookup is no longer as critical as it used to be:
 * most of the lookups are going to be through the dcache.
 */
#define I_HASHBITS  i_hash_shift
#define I_HASHMASK  i_hash_mask

static unsigned int i_hash_mask;
static unsigned int i_hash_shift;

/* L1 cache line size */
#define L1_CACHE_SHIFT  (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)

No. 4

/* Borrowed from buffer.c: this is a tried and tested block hash function */
static inline int hash(journal_t *journal, unsigned long block)
{
    struct jbd_revoke_table_s *table = journal->j_revoke;
    int hash_shift = table->hash_shift;

    return ((block << (hash_shift - 6)) ^
        (block >> 13) ^
        (block << (hash_shift - 12))) & (table->hash_size - 1);
}

No. 5

[/fs/namespace.c]

static int hash_mask, hash_bits;
static kmem_cache_t *mnt_cache; 

static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
{
    unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
    tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
    tmp = tmp + (tmp >> hash_bits);
    return tmp & hash_mask;
}

No. 6

[/init/initramfs.c]

static inline int hash(int major, int minor, int ino)
{
    unsigned long tmp = ino + minor + (major << 3);
    tmp += tmp >> 5;
    return tmp & 31;
}

No. 7

[/net/tipc/name_table.c]

static int tipc_nametbl_size = 1024;        /* must be a power of 2 */

static int hash(int x)
{
    return x & (tipc_nametbl_size - 1);
}

猜你喜欢

转载自blog.csdn.net/wwchao2012/article/details/80342389