mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 09:22:37 +00:00
5d52dad27a
The hash for claim and backbone hash in the bridge loop avoidance code receive the same key because they are getting initialized by hash_new with the same key. Lockdep will create a backtrace when they are used recursively. This can be avoided by reinitializing the key directly after the hash_new. Signed-off-by: Sven Eckelmann <sven@narfation.org>
81 lines
1.9 KiB
C
81 lines
1.9 KiB
C
/*
|
|
* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
|
|
*
|
|
* Simon Wunderlich, Marek Lindner
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
* License as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
* 02110-1301, USA
|
|
*
|
|
*/
|
|
|
|
#include "main.h"
|
|
#include "hash.h"
|
|
|
|
/* clears the hash */
|
|
static void hash_init(struct hashtable_t *hash)
|
|
{
|
|
uint32_t i;
|
|
|
|
for (i = 0 ; i < hash->size; i++) {
|
|
INIT_HLIST_HEAD(&hash->table[i]);
|
|
spin_lock_init(&hash->list_locks[i]);
|
|
}
|
|
}
|
|
|
|
/* free only the hashtable and the hash itself. */
|
|
void hash_destroy(struct hashtable_t *hash)
|
|
{
|
|
kfree(hash->list_locks);
|
|
kfree(hash->table);
|
|
kfree(hash);
|
|
}
|
|
|
|
/* allocates and clears the hash */
|
|
struct hashtable_t *hash_new(uint32_t size)
|
|
{
|
|
struct hashtable_t *hash;
|
|
|
|
hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
|
|
if (!hash)
|
|
return NULL;
|
|
|
|
hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC);
|
|
if (!hash->table)
|
|
goto free_hash;
|
|
|
|
hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size,
|
|
GFP_ATOMIC);
|
|
if (!hash->list_locks)
|
|
goto free_table;
|
|
|
|
hash->size = size;
|
|
hash_init(hash);
|
|
return hash;
|
|
|
|
free_table:
|
|
kfree(hash->table);
|
|
free_hash:
|
|
kfree(hash);
|
|
return NULL;
|
|
}
|
|
|
|
void batadv_hash_set_lock_class(struct hashtable_t *hash,
|
|
struct lock_class_key *key)
|
|
{
|
|
uint32_t i;
|
|
|
|
for (i = 0; i < hash->size; i++)
|
|
lockdep_set_class(&hash->list_locks[i], key);
|
|
}
|