2013-11-24 14:54:58 +00:00
|
|
|
/*
|
|
|
|
* fs/kernfs/dir.c - kernfs directory implementation
|
|
|
|
*
|
|
|
|
* Copyright (c) 2001-3 Patrick Mochel
|
|
|
|
* Copyright (c) 2007 SUSE Linux Products GmbH
|
|
|
|
* Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
|
|
|
|
*
|
|
|
|
* This file is released under the GPLv2.
|
|
|
|
*/
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2014-02-03 19:02:55 +00:00
|
|
|
#include <linux/sched.h>
|
2013-11-28 19:54:33 +00:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/idr.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/hash.h>
|
|
|
|
|
|
|
|
#include "kernfs-internal.h"
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
DEFINE_MUTEX(kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:58 +00:00
|
|
|
* kernfs_name_hash
|
2013-11-28 19:54:33 +00:00
|
|
|
* @name: Null terminated string to hash
|
|
|
|
* @ns: Namespace tag to hash
|
|
|
|
*
|
|
|
|
* Returns 31 bit hash of ns + name (so it fits in an off_t )
|
|
|
|
*/
|
2013-12-11 19:11:58 +00:00
|
|
|
static unsigned int kernfs_name_hash(const char *name, const void *ns)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
unsigned long hash = init_name_hash();
|
|
|
|
unsigned int len = strlen(name);
|
|
|
|
while (len--)
|
|
|
|
hash = partial_name_hash(*name++, hash);
|
|
|
|
hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
|
|
|
|
hash &= 0x7fffffffU;
|
|
|
|
/* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
|
|
|
|
if (hash < 1)
|
|
|
|
hash += 2;
|
|
|
|
if (hash >= INT_MAX)
|
|
|
|
hash = INT_MAX - 1;
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static int kernfs_name_compare(unsigned int hash, const char *name,
|
|
|
|
const void *ns, const struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:54 +00:00
|
|
|
if (hash != kn->hash)
|
|
|
|
return hash - kn->hash;
|
|
|
|
if (ns != kn->ns)
|
|
|
|
return ns - kn->ns;
|
|
|
|
return strcmp(name, kn->name);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static int kernfs_sd_compare(const struct kernfs_node *left,
|
|
|
|
const struct kernfs_node *right)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:58 +00:00
|
|
|
return kernfs_name_compare(left->hash, left->name, left->ns, right);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:58 +00:00
|
|
|
* kernfs_link_sibling - link kernfs_node into sibling rbtree
|
2013-12-11 19:11:53 +00:00
|
|
|
* @kn: kernfs_node of interest
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Link @kn into its sibling rbtree which starts from
|
2013-12-11 19:11:54 +00:00
|
|
|
* @kn->parent->dir.children.
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
|
|
|
* Locking:
|
2013-12-11 19:11:57 +00:00
|
|
|
* mutex_lock(kernfs_mutex)
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on susccess -EEXIST on failure.
|
|
|
|
*/
|
2013-12-11 19:11:58 +00:00
|
|
|
static int kernfs_link_sibling(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:54 +00:00
|
|
|
struct rb_node **node = &kn->parent->dir.children.rb_node;
|
2013-11-28 19:54:33 +00:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
|
2013-12-11 19:11:56 +00:00
|
|
|
if (kernfs_type(kn) == KERNFS_DIR)
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->parent->dir.subdirs++;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
while (*node) {
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *pos;
|
2013-11-28 19:54:33 +00:00
|
|
|
int result;
|
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
pos = rb_to_kn(*node);
|
2013-11-28 19:54:33 +00:00
|
|
|
parent = *node;
|
2013-12-11 19:11:58 +00:00
|
|
|
result = kernfs_sd_compare(kn, pos);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (result < 0)
|
2013-12-11 19:11:54 +00:00
|
|
|
node = &pos->rb.rb_left;
|
2013-11-28 19:54:33 +00:00
|
|
|
else if (result > 0)
|
2013-12-11 19:11:54 +00:00
|
|
|
node = &pos->rb.rb_right;
|
2013-11-28 19:54:33 +00:00
|
|
|
else
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
/* add new node and rebalance the tree */
|
2013-12-11 19:11:54 +00:00
|
|
|
rb_link_node(&kn->rb, parent, node);
|
|
|
|
rb_insert_color(&kn->rb, &kn->parent->dir.children);
|
2013-11-28 19:54:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:58 +00:00
|
|
|
* kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
|
2013-12-11 19:11:53 +00:00
|
|
|
* @kn: kernfs_node of interest
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
* Try to unlink @kn from its sibling rbtree which starts from
|
|
|
|
* kn->parent->dir.children. Returns %true if @kn was actually
|
|
|
|
* removed, %false if @kn wasn't on the rbtree.
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
|
|
|
* Locking:
|
2013-12-11 19:11:57 +00:00
|
|
|
* mutex_lock(kernfs_mutex)
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
if (RB_EMPTY_NODE(&kn->rb))
|
|
|
|
return false;
|
|
|
|
|
2013-12-11 19:11:56 +00:00
|
|
|
if (kernfs_type(kn) == KERNFS_DIR)
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->parent->dir.subdirs--;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
rb_erase(&kn->rb, &kn->parent->dir.children);
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
RB_CLEAR_NODE(&kn->rb);
|
|
|
|
return true;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:58 +00:00
|
|
|
* kernfs_get_active - get an active reference to kernfs_node
|
2013-12-11 19:11:53 +00:00
|
|
|
* @kn: kernfs_node to get an active reference to
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Get an active reference of @kn. This function is noop if @kn
|
2013-11-28 19:54:33 +00:00
|
|
|
* is NULL.
|
|
|
|
*
|
|
|
|
* RETURNS:
|
2013-12-11 19:11:53 +00:00
|
|
|
* Pointer to @kn on success, NULL on failure.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:58 +00:00
|
|
|
struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
if (unlikely(!kn))
|
2013-11-28 19:54:33 +00:00
|
|
|
return NULL;
|
|
|
|
|
2014-01-13 22:13:39 +00:00
|
|
|
if (!atomic_inc_unless_negative(&kn->active))
|
|
|
|
return NULL;
|
2014-01-10 13:57:25 +00:00
|
|
|
|
2014-01-13 22:39:52 +00:00
|
|
|
if (kn->flags & KERNFS_LOCKDEP)
|
2014-01-13 22:13:39 +00:00
|
|
|
rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
|
|
|
|
return kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:58 +00:00
|
|
|
* kernfs_put_active - put an active reference to kernfs_node
|
2013-12-11 19:11:53 +00:00
|
|
|
* @kn: kernfs_node to put an active reference to
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Put an active reference to @kn. This function is noop if @kn
|
2013-11-28 19:54:33 +00:00
|
|
|
* is NULL.
|
|
|
|
*/
|
2013-12-11 19:11:58 +00:00
|
|
|
void kernfs_put_active(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2014-02-03 19:02:55 +00:00
|
|
|
struct kernfs_root *root = kernfs_root(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
int v;
|
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
if (unlikely(!kn))
|
2013-11-28 19:54:33 +00:00
|
|
|
return;
|
|
|
|
|
2014-01-13 22:39:52 +00:00
|
|
|
if (kn->flags & KERNFS_LOCKDEP)
|
2013-12-11 19:11:53 +00:00
|
|
|
rwsem_release(&kn->dep_map, 1, _RET_IP_);
|
2013-12-11 19:11:54 +00:00
|
|
|
v = atomic_dec_return(&kn->active);
|
2013-12-11 19:11:56 +00:00
|
|
|
if (likely(v != KN_DEACTIVATED_BIAS))
|
2013-11-28 19:54:33 +00:00
|
|
|
return;
|
|
|
|
|
2014-02-03 19:02:55 +00:00
|
|
|
wake_up_all(&root->deactivate_waitq);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2014-01-13 22:36:03 +00:00
|
|
|
* kernfs_deactivate - deactivate kernfs_node
|
|
|
|
* @kn: kernfs_node to deactivate
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2014-02-03 19:02:57 +00:00
|
|
|
* Deny new active references, drain existing ones and nuke all
|
|
|
|
* existing mmaps. Mutiple removers may invoke this function
|
|
|
|
* concurrently on @kn and all will return after deactivation and
|
|
|
|
* draining are complete.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2014-01-13 22:36:03 +00:00
|
|
|
static void kernfs_deactivate(struct kernfs_node *kn)
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
__releases(&kernfs_mutex) __acquires(&kernfs_mutex)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2014-02-03 19:02:55 +00:00
|
|
|
struct kernfs_root *root = kernfs_root(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
lockdep_assert_held(&kernfs_mutex);
|
2014-01-13 22:36:03 +00:00
|
|
|
BUG_ON(!(kn->flags & KERNFS_REMOVED));
|
|
|
|
|
2014-01-13 22:39:52 +00:00
|
|
|
if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
|
|
|
|
return;
|
|
|
|
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
/* only the first invocation on @kn should deactivate it */
|
|
|
|
if (atomic_read(&kn->active) >= 0)
|
|
|
|
atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
|
2014-01-10 13:57:19 +00:00
|
|
|
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2014-02-03 19:02:55 +00:00
|
|
|
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
if (kn->flags & KERNFS_LOCKDEP) {
|
|
|
|
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
|
|
|
|
if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
|
|
|
|
lock_contended(&kn->dep_map, _RET_IP_);
|
|
|
|
}
|
2014-02-03 19:02:55 +00:00
|
|
|
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
/* but everyone should wait for draining */
|
2014-02-03 19:02:55 +00:00
|
|
|
wait_event(root->deactivate_waitq,
|
|
|
|
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2014-02-03 19:02:54 +00:00
|
|
|
if (kn->flags & KERNFS_LOCKDEP) {
|
|
|
|
lock_acquired(&kn->dep_map, _RET_IP_);
|
|
|
|
rwsem_release(&kn->dep_map, 1, _RET_IP_);
|
|
|
|
}
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
|
2014-02-03 19:02:57 +00:00
|
|
|
kernfs_unmap_bin_file(kn);
|
|
|
|
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:53 +00:00
|
|
|
* kernfs_get - get a reference count on a kernfs_node
|
|
|
|
* @kn: the target kernfs_node
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
void kernfs_get(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
if (kn) {
|
2013-12-11 19:11:54 +00:00
|
|
|
WARN_ON(!atomic_read(&kn->count));
|
|
|
|
atomic_inc(&kn->count);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kernfs_get);
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:53 +00:00
|
|
|
* kernfs_put - put a reference count on a kernfs_node
|
|
|
|
* @kn: the target kernfs_node
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Put a reference count of @kn and destroy it if it reached zero.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
void kernfs_put(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *parent;
|
2013-11-28 19:54:40 +00:00
|
|
|
struct kernfs_root *root;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
if (!kn || !atomic_dec_and_test(&kn->count))
|
2013-11-28 19:54:33 +00:00
|
|
|
return;
|
2013-12-11 19:11:53 +00:00
|
|
|
root = kernfs_root(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
repeat:
|
2014-01-13 22:36:03 +00:00
|
|
|
/* Moving/renaming is always done while holding reference.
|
2013-12-11 19:11:54 +00:00
|
|
|
* kn->parent won't change beneath us.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:54 +00:00
|
|
|
parent = kn->parent;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2014-01-13 22:36:03 +00:00
|
|
|
WARN(!(kn->flags & KERNFS_REMOVED), "kernfs: free using entry: %s/%s\n",
|
|
|
|
parent ? parent->name : "", kn->name);
|
2013-12-11 19:11:53 +00:00
|
|
|
|
2013-12-11 19:11:56 +00:00
|
|
|
if (kernfs_type(kn) == KERNFS_LINK)
|
2013-12-11 19:11:54 +00:00
|
|
|
kernfs_put(kn->symlink.target_kn);
|
2013-12-11 21:02:57 +00:00
|
|
|
if (!(kn->flags & KERNFS_STATIC_NAME))
|
2013-12-11 19:11:54 +00:00
|
|
|
kfree(kn->name);
|
|
|
|
if (kn->iattr) {
|
|
|
|
if (kn->iattr->ia_secdata)
|
|
|
|
security_release_secctx(kn->iattr->ia_secdata,
|
|
|
|
kn->iattr->ia_secdata_len);
|
|
|
|
simple_xattrs_free(&kn->iattr->xattrs);
|
2013-11-23 22:40:02 +00:00
|
|
|
}
|
2013-12-11 19:11:54 +00:00
|
|
|
kfree(kn->iattr);
|
|
|
|
ida_simple_remove(&root->ino_ida, kn->ino);
|
2013-12-11 19:11:57 +00:00
|
|
|
kmem_cache_free(kernfs_node_cache, kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
kn = parent;
|
|
|
|
if (kn) {
|
2013-12-11 19:11:54 +00:00
|
|
|
if (atomic_dec_and_test(&kn->count))
|
2013-11-28 19:54:40 +00:00
|
|
|
goto repeat;
|
|
|
|
} else {
|
2013-12-11 19:11:53 +00:00
|
|
|
/* just released the root kn, free @root too */
|
2013-11-28 19:54:41 +00:00
|
|
|
ida_destroy(&root->ino_ida);
|
2013-11-28 19:54:40 +00:00
|
|
|
kfree(root);
|
|
|
|
}
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kernfs_put);
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
|
2013-12-11 21:02:59 +00:00
|
|
|
/* Always perform fresh lookup for negatives */
|
|
|
|
if (!dentry->d_inode)
|
|
|
|
goto out_bad_unlocked;
|
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
kn = dentry->d_fsdata;
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2014-01-13 22:36:03 +00:00
|
|
|
/* The kernfs node has been deleted */
|
|
|
|
if (kn->flags & KERNFS_REMOVED)
|
2013-11-28 19:54:33 +00:00
|
|
|
goto out_bad;
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
/* The kernfs node has been moved? */
|
2013-12-11 19:11:54 +00:00
|
|
|
if (dentry->d_parent->d_fsdata != kn->parent)
|
2013-11-28 19:54:33 +00:00
|
|
|
goto out_bad;
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
/* The kernfs node has been renamed */
|
2013-12-11 19:11:54 +00:00
|
|
|
if (strcmp(dentry->d_name.name, kn->name) != 0)
|
2013-11-28 19:54:33 +00:00
|
|
|
goto out_bad;
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
/* The kernfs node has been moved to a different namespace */
|
2013-12-11 19:11:54 +00:00
|
|
|
if (kn->parent && kernfs_ns_enabled(kn->parent) &&
|
2013-12-11 19:11:55 +00:00
|
|
|
kernfs_info(dentry->d_sb)->ns != kn->ns)
|
2013-11-28 19:54:33 +00:00
|
|
|
goto out_bad;
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
out_valid:
|
|
|
|
return 1;
|
|
|
|
out_bad:
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-12-11 21:02:59 +00:00
|
|
|
out_bad_unlocked:
|
|
|
|
/*
|
|
|
|
* @dentry doesn't match the underlying kernfs node, drop the
|
|
|
|
* dentry and force lookup. If we have submounts we must allow the
|
|
|
|
* vfs caches to lie about the state of the filesystem to prevent
|
|
|
|
* leaks and other nasty things, so use check_submounts_and_drop()
|
|
|
|
* instead of d_drop().
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
|
|
|
if (check_submounts_and_drop(dentry) != 0)
|
|
|
|
goto out_valid;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static void kernfs_dop_release(struct dentry *dentry)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
kernfs_put(dentry->d_fsdata);
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
const struct dentry_operations kernfs_dops = {
|
2013-12-11 19:11:58 +00:00
|
|
|
.d_revalidate = kernfs_dop_revalidate,
|
|
|
|
.d_release = kernfs_dop_release,
|
2013-11-28 19:54:33 +00:00
|
|
|
};
|
|
|
|
|
2014-01-17 14:58:25 +00:00
|
|
|
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
|
|
|
|
const char *name, umode_t mode,
|
|
|
|
unsigned flags)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
char *dup_name = NULL;
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:41 +00:00
|
|
|
int ret;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 21:02:57 +00:00
|
|
|
if (!(flags & KERNFS_STATIC_NAME)) {
|
2013-11-28 19:54:33 +00:00
|
|
|
name = dup_name = kstrdup(name, GFP_KERNEL);
|
|
|
|
if (!name)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
|
2013-12-11 19:11:53 +00:00
|
|
|
if (!kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
goto err_out1;
|
|
|
|
|
2013-11-28 19:54:41 +00:00
|
|
|
ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
|
|
|
|
if (ret < 0)
|
2013-11-28 19:54:33 +00:00
|
|
|
goto err_out2;
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->ino = ret;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
atomic_set(&kn->count, 1);
|
2014-01-13 22:36:03 +00:00
|
|
|
atomic_set(&kn->active, 0);
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
RB_CLEAR_NODE(&kn->rb);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->name = name;
|
|
|
|
kn->mode = mode;
|
2014-01-13 22:36:03 +00:00
|
|
|
kn->flags = flags | KERNFS_REMOVED;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
return kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
err_out2:
|
2013-12-11 19:11:57 +00:00
|
|
|
kmem_cache_free(kernfs_node_cache, kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
err_out1:
|
|
|
|
kfree(dup_name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-01-17 14:58:25 +00:00
|
|
|
struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
|
|
|
|
const char *name, umode_t mode,
|
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
struct kernfs_node *kn;
|
|
|
|
|
|
|
|
kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
|
|
|
|
if (kn) {
|
|
|
|
kernfs_get(parent);
|
|
|
|
kn->parent = parent;
|
|
|
|
}
|
|
|
|
return kn;
|
|
|
|
}
|
|
|
|
|
2013-11-28 19:54:33 +00:00
|
|
|
/**
|
2013-12-11 19:11:58 +00:00
|
|
|
* kernfs_add_one - add kernfs_node to parent without warning
|
2013-12-11 19:11:53 +00:00
|
|
|
* @kn: kernfs_node to be added
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2014-01-17 14:58:25 +00:00
|
|
|
* The caller must already have initialized @kn->parent. This
|
|
|
|
* function increments nlink of the parent's inode if @kn is a
|
|
|
|
* directory and link into the children list of the parent.
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success, -EEXIST if entry with the given name already
|
|
|
|
* exists.
|
|
|
|
*/
|
2014-02-03 19:02:58 +00:00
|
|
|
int kernfs_add_one(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2014-01-17 14:58:25 +00:00
|
|
|
struct kernfs_node *parent = kn->parent;
|
2013-12-11 19:11:55 +00:00
|
|
|
struct kernfs_iattrs *ps_iattr;
|
2014-02-03 19:02:58 +00:00
|
|
|
bool has_ns;
|
2013-11-28 19:54:33 +00:00
|
|
|
int ret;
|
|
|
|
|
2014-02-03 19:02:58 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
has_ns = kernfs_ns_enabled(parent);
|
|
|
|
if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
|
|
|
|
has_ns ? "required" : "invalid", parent->name, kn->name))
|
|
|
|
goto out_unlock;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:56 +00:00
|
|
|
if (kernfs_type(parent) != KERNFS_DIR)
|
2014-02-03 19:02:58 +00:00
|
|
|
goto out_unlock;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2014-02-03 19:02:58 +00:00
|
|
|
ret = -ENOENT;
|
2014-01-13 22:36:03 +00:00
|
|
|
if (parent->flags & KERNFS_REMOVED)
|
2014-02-03 19:02:58 +00:00
|
|
|
goto out_unlock;
|
2014-01-13 22:36:03 +00:00
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
kn->hash = kernfs_name_hash(kn->name, kn->ns);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
ret = kernfs_link_sibling(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (ret)
|
2014-02-03 19:02:58 +00:00
|
|
|
goto out_unlock;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/* Update timestamps on the parent */
|
2013-12-11 19:11:54 +00:00
|
|
|
ps_iattr = parent->iattr;
|
2013-11-28 19:54:33 +00:00
|
|
|
if (ps_iattr) {
|
|
|
|
struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
|
|
|
|
ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark the entry added into directory tree */
|
2014-01-13 22:36:03 +00:00
|
|
|
kn->flags &= ~KERNFS_REMOVED;
|
2014-02-03 19:02:58 +00:00
|
|
|
ret = 0;
|
|
|
|
out_unlock:
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2014-02-03 19:02:58 +00:00
|
|
|
return ret;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:53 +00:00
|
|
|
* kernfs_find_ns - find kernfs_node with the given name
|
|
|
|
* @parent: kernfs_node to search under
|
2013-11-28 19:54:33 +00:00
|
|
|
* @name: name to look for
|
|
|
|
* @ns: the namespace tag to use
|
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Look for kernfs_node with name @name under @parent. Returns pointer to
|
|
|
|
* the found kernfs_node on success, %NULL on failure.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
|
|
|
|
const unsigned char *name,
|
|
|
|
const void *ns)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:54 +00:00
|
|
|
struct rb_node *node = parent->dir.children.rb_node;
|
2013-11-29 22:19:09 +00:00
|
|
|
bool has_ns = kernfs_ns_enabled(parent);
|
2013-11-28 19:54:33 +00:00
|
|
|
unsigned int hash;
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
lockdep_assert_held(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
if (has_ns != (bool)ns) {
|
2013-12-11 19:11:58 +00:00
|
|
|
WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
|
2013-12-11 19:11:54 +00:00
|
|
|
has_ns ? "required" : "invalid", parent->name, name);
|
2013-11-28 19:54:33 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
hash = kernfs_name_hash(name, ns);
|
2013-11-28 19:54:33 +00:00
|
|
|
while (node) {
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
int result;
|
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
kn = rb_to_kn(node);
|
2013-12-11 19:11:58 +00:00
|
|
|
result = kernfs_name_compare(hash, name, ns, kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (result < 0)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (result > 0)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
2013-12-11 19:11:53 +00:00
|
|
|
return kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:53 +00:00
|
|
|
* kernfs_find_and_get_ns - find and get kernfs_node with the given name
|
|
|
|
* @parent: kernfs_node to search under
|
2013-11-28 19:54:33 +00:00
|
|
|
* @name: name to look for
|
|
|
|
* @ns: the namespace tag to use
|
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Look for kernfs_node with name @name under @parent and get a reference
|
2013-11-28 19:54:33 +00:00
|
|
|
* if found. This function may sleep and returns pointer to the found
|
2013-12-11 19:11:53 +00:00
|
|
|
* kernfs_node on success, %NULL on failure.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
|
|
|
|
const char *name, const void *ns)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
2013-12-11 19:11:53 +00:00
|
|
|
kn = kernfs_find_ns(parent, name, ns);
|
|
|
|
kernfs_get(kn);
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
return kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
|
|
|
|
|
2013-11-28 19:54:40 +00:00
|
|
|
/**
|
|
|
|
* kernfs_create_root - create a new kernfs hierarchy
|
2013-12-11 21:03:00 +00:00
|
|
|
* @kdops: optional directory syscall operations for the hierarchy
|
2013-11-28 19:54:40 +00:00
|
|
|
* @priv: opaque data associated with the new directory
|
|
|
|
*
|
|
|
|
* Returns the root of the new hierarchy on success, ERR_PTR() value on
|
|
|
|
* failure.
|
|
|
|
*/
|
2013-12-11 21:03:00 +00:00
|
|
|
struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv)
|
2013-11-28 19:54:40 +00:00
|
|
|
{
|
|
|
|
struct kernfs_root *root;
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:40 +00:00
|
|
|
|
|
|
|
root = kzalloc(sizeof(*root), GFP_KERNEL);
|
|
|
|
if (!root)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2013-11-28 19:54:41 +00:00
|
|
|
ida_init(&root->ino_ida);
|
|
|
|
|
2014-01-17 14:58:25 +00:00
|
|
|
kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
|
|
|
|
KERNFS_DIR);
|
2013-12-11 19:11:53 +00:00
|
|
|
if (!kn) {
|
2013-11-28 19:54:41 +00:00
|
|
|
ida_destroy(&root->ino_ida);
|
2013-11-28 19:54:40 +00:00
|
|
|
kfree(root);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2014-01-13 22:36:03 +00:00
|
|
|
kn->flags &= ~KERNFS_REMOVED;
|
2013-12-11 19:11:53 +00:00
|
|
|
kn->priv = priv;
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->dir.root = root;
|
2013-11-28 19:54:40 +00:00
|
|
|
|
2013-12-11 21:03:00 +00:00
|
|
|
root->dir_ops = kdops;
|
2013-12-11 19:11:53 +00:00
|
|
|
root->kn = kn;
|
2014-02-03 19:02:55 +00:00
|
|
|
init_waitqueue_head(&root->deactivate_waitq);
|
2013-11-28 19:54:40 +00:00
|
|
|
|
|
|
|
return root;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kernfs_destroy_root - destroy a kernfs hierarchy
|
|
|
|
* @root: root of the hierarchy to destroy
|
|
|
|
*
|
|
|
|
* Destroy the hierarchy anchored at @root by removing all existing
|
|
|
|
* directories and destroying @root.
|
|
|
|
*/
|
|
|
|
void kernfs_destroy_root(struct kernfs_root *root)
|
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
kernfs_remove(root->kn); /* will also free @root */
|
2013-11-28 19:54:40 +00:00
|
|
|
}
|
|
|
|
|
2013-11-28 19:54:33 +00:00
|
|
|
/**
|
|
|
|
* kernfs_create_dir_ns - create a directory
|
|
|
|
* @parent: parent in which to create a new directory
|
|
|
|
* @name: name of the new directory
|
2013-12-11 21:02:55 +00:00
|
|
|
* @mode: mode of the new directory
|
2013-11-28 19:54:33 +00:00
|
|
|
* @priv: opaque data associated with the new directory
|
|
|
|
* @ns: optional namespace tag of the directory
|
|
|
|
*
|
|
|
|
* Returns the created node on success, ERR_PTR() value on failure.
|
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
|
2013-12-11 21:02:55 +00:00
|
|
|
const char *name, umode_t mode,
|
|
|
|
void *priv, const void *ns)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* allocate */
|
2014-01-17 14:58:25 +00:00
|
|
|
kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
|
2013-12-11 19:11:53 +00:00
|
|
|
if (!kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->dir.root = parent->dir.root;
|
|
|
|
kn->ns = ns;
|
2013-12-11 19:11:53 +00:00
|
|
|
kn->priv = priv;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/* link in */
|
2014-02-03 19:02:58 +00:00
|
|
|
rc = kernfs_add_one(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (!rc)
|
2013-12-11 19:11:53 +00:00
|
|
|
return kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
kernfs_put(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static struct dentry *kernfs_iop_lookup(struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
unsigned int flags)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 21:02:59 +00:00
|
|
|
struct dentry *ret;
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *parent = dentry->d_parent->d_fsdata;
|
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
struct inode *inode;
|
|
|
|
const void *ns = NULL;
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
if (kernfs_ns_enabled(parent))
|
2013-12-11 19:11:55 +00:00
|
|
|
ns = kernfs_info(dir->i_sb)->ns;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/* no such entry */
|
2013-12-11 19:11:53 +00:00
|
|
|
if (!kn) {
|
2013-12-11 21:02:59 +00:00
|
|
|
ret = NULL;
|
2013-11-28 19:54:33 +00:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
2013-12-11 19:11:53 +00:00
|
|
|
kernfs_get(kn);
|
|
|
|
dentry->d_fsdata = kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/* attach dentry and inode */
|
2013-12-11 19:11:58 +00:00
|
|
|
inode = kernfs_get_inode(dir->i_sb, kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (!inode) {
|
|
|
|
ret = ERR_PTR(-ENOMEM);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* instantiate and hash dentry */
|
|
|
|
ret = d_materialise_unique(dentry, inode);
|
|
|
|
out_unlock:
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-12-11 21:03:00 +00:00
|
|
|
static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
|
|
|
|
umode_t mode)
|
|
|
|
{
|
|
|
|
struct kernfs_node *parent = dir->i_private;
|
|
|
|
struct kernfs_dir_ops *kdops = kernfs_root(parent)->dir_ops;
|
|
|
|
|
|
|
|
if (!kdops || !kdops->mkdir)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return kdops->mkdir(parent, dentry->d_name.name, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct kernfs_node *kn = dentry->d_fsdata;
|
|
|
|
struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops;
|
|
|
|
|
|
|
|
if (!kdops || !kdops->rmdir)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return kdops->rmdir(kn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
|
|
struct inode *new_dir, struct dentry *new_dentry)
|
|
|
|
{
|
|
|
|
struct kernfs_node *kn = old_dentry->d_fsdata;
|
|
|
|
struct kernfs_node *new_parent = new_dir->i_private;
|
|
|
|
struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops;
|
|
|
|
|
|
|
|
if (!kdops || !kdops->rename)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return kdops->rename(kn, new_parent, new_dentry->d_name.name);
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
const struct inode_operations kernfs_dir_iops = {
|
2013-12-11 19:11:58 +00:00
|
|
|
.lookup = kernfs_iop_lookup,
|
|
|
|
.permission = kernfs_iop_permission,
|
|
|
|
.setattr = kernfs_iop_setattr,
|
|
|
|
.getattr = kernfs_iop_getattr,
|
|
|
|
.setxattr = kernfs_iop_setxattr,
|
|
|
|
.removexattr = kernfs_iop_removexattr,
|
|
|
|
.getxattr = kernfs_iop_getxattr,
|
|
|
|
.listxattr = kernfs_iop_listxattr,
|
2013-12-11 21:03:00 +00:00
|
|
|
|
|
|
|
.mkdir = kernfs_iop_mkdir,
|
|
|
|
.rmdir = kernfs_iop_rmdir,
|
|
|
|
.rename = kernfs_iop_rename,
|
2013-11-28 19:54:33 +00:00
|
|
|
};
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *last;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
while (true) {
|
|
|
|
struct rb_node *rbn;
|
|
|
|
|
|
|
|
last = pos;
|
|
|
|
|
2013-12-11 19:11:56 +00:00
|
|
|
if (kernfs_type(pos) != KERNFS_DIR)
|
2013-11-28 19:54:33 +00:00
|
|
|
break;
|
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
rbn = rb_first(&pos->dir.children);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (!rbn)
|
|
|
|
break;
|
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
pos = rb_to_kn(rbn);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:58 +00:00
|
|
|
* kernfs_next_descendant_post - find the next descendant for post-order walk
|
2013-11-28 19:54:33 +00:00
|
|
|
* @pos: the current position (%NULL to initiate traversal)
|
2013-12-11 19:11:53 +00:00
|
|
|
* @root: kernfs_node whose descendants to walk
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
|
|
|
* Find the next descendant to visit for post-order traversal of @root's
|
|
|
|
* descendants. @root is included in the iteration and the last node to be
|
|
|
|
* visited.
|
|
|
|
*/
|
2013-12-11 19:11:58 +00:00
|
|
|
static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
|
|
|
|
struct kernfs_node *root)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
struct rb_node *rbn;
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
lockdep_assert_held(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/* if first iteration, visit leftmost descendant which may be root */
|
|
|
|
if (!pos)
|
2013-12-11 19:11:58 +00:00
|
|
|
return kernfs_leftmost_descendant(root);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/* if we visited @root, we're done */
|
|
|
|
if (pos == root)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* if there's an unvisited sibling, visit its leftmost descendant */
|
2013-12-11 19:11:54 +00:00
|
|
|
rbn = rb_next(&pos->rb);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (rbn)
|
2013-12-11 19:11:58 +00:00
|
|
|
return kernfs_leftmost_descendant(rb_to_kn(rbn));
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
/* no sibling left, visit parent */
|
2013-12-11 19:11:54 +00:00
|
|
|
return pos->parent;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
2014-02-03 19:02:58 +00:00
|
|
|
static void __kernfs_remove(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
struct kernfs_node *pos;
|
|
|
|
|
|
|
|
lockdep_assert_held(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2014-01-13 21:50:31 +00:00
|
|
|
if (!kn)
|
|
|
|
return;
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
pr_debug("kernfs %s: removing\n", kn->name);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
/* disable lookup and node creation under @kn */
|
|
|
|
pos = NULL;
|
|
|
|
while ((pos = kernfs_next_descendant_post(pos, kn)))
|
|
|
|
pos->flags |= KERNFS_REMOVED;
|
|
|
|
|
|
|
|
/* deactivate and unlink the subtree node-by-node */
|
2013-11-28 19:54:33 +00:00
|
|
|
do {
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
pos = kernfs_leftmost_descendant(kn);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kernfs_deactivate() drops kernfs_mutex temporarily and
|
|
|
|
* @pos's base ref could have been put by someone else by
|
|
|
|
* the time the function returns. Make sure it doesn't go
|
|
|
|
* away underneath us.
|
|
|
|
*/
|
|
|
|
kernfs_get(pos);
|
|
|
|
|
|
|
|
kernfs_deactivate(pos);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kernfs_unlink_sibling() succeeds once per node. Use it
|
|
|
|
* to decide who's responsible for cleanups.
|
|
|
|
*/
|
|
|
|
if (!pos->parent || kernfs_unlink_sibling(pos)) {
|
|
|
|
struct kernfs_iattrs *ps_iattr =
|
|
|
|
pos->parent ? pos->parent->iattr : NULL;
|
|
|
|
|
|
|
|
/* update timestamps on the parent */
|
|
|
|
if (ps_iattr) {
|
|
|
|
ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
|
|
|
|
ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
|
|
|
|
}
|
|
|
|
|
2014-02-03 19:02:58 +00:00
|
|
|
kernfs_put(pos);
|
kernfs: restructure removal path to fix possible premature return
The recursive nature of kernfs_remove() means that, even if
kernfs_remove() is not allowed to be called multiple times on the same
node, there may be race conditions between removal of parent and its
descendants. While we can claim that kernfs_remove() shouldn't be
called on one of the descendants while the removal of an ancestor is
in progress, such rule is unnecessarily restrictive and very difficult
to enforce. It's better to simply allow invoking kernfs_remove() as
the caller sees fit as long as the caller ensures that the node is
accessible.
The current behavior in such situations is broken. Whoever enters
removal path first takes the node off the hierarchy and then
deactivates. Following removers either return as soon as it notices
that it's not the first one or can't even find the target node as it
has already been removed from the hierarchy. In both cases, the
following removers may finish prematurely while the nodes which should
be removed and drained are still being processed by the first one.
This patch restructures so that multiple removers, whether through
recursion or direction invocation, always follow the following rules.
* When there are multiple concurrent removers, only one puts the base
ref.
* Regardless of which one puts the base ref, all removers are blocked
until the target node is fully deactivated and removed.
To achieve the above, removal path now first marks all descendants
including self REMOVED and then deactivates and unlinks leftmost
descendant one-by-one. kernfs_deactivate() is called directly from
__kernfs_removal() and drops and regrabs kernfs_mutex for each
descendant to drain active refs. As this means that multiple removers
can enter kernfs_deactivate() for the same node, the function is
updated so that it can handle multiple deactivators of the same node -
only one actually deactivates but all wait till drain completion.
The restructured removal path guarantees that a removed node gets
unlinked only after the node is deactivated and drained. Combined
with proper multiple deactivator handling, this guarantees that any
invocation of kernfs_remove() returns only after the node itself and
all its descendants are deactivated, drained and removed.
v2: Draining separated into a separate loop (used to be in the same
loop as unlink) and done from __kernfs_deactivate(). This is to
allow exposing deactivation as a separate interface later.
Root node removal was broken in v1 patch. Fixed.
v3: Revert most of v2 except for root node removal fix and
simplification of KERNFS_REMOVED setting loop.
v4: Refreshed on top of ("kernfs: make kernfs_deactivate() honor
KERNFS_LOCKDEP flag").
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-02-03 19:02:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kernfs_put(pos);
|
|
|
|
} while (pos != kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:53 +00:00
|
|
|
* kernfs_remove - remove a kernfs_node recursively
|
|
|
|
* @kn: the kernfs_node to remove
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Remove @kn along with all its subdirectories and files.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
void kernfs_remove(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2014-02-03 19:02:58 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
|
|
|
__kernfs_remove(kn);
|
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-12-11 19:11:53 +00:00
|
|
|
* kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
|
|
|
|
* @parent: parent of the target
|
|
|
|
* @name: name of the kernfs_node to remove
|
|
|
|
* @ns: namespace tag of the kernfs_node to remove
|
2013-11-28 19:54:33 +00:00
|
|
|
*
|
2013-12-11 19:11:53 +00:00
|
|
|
* Look for the kernfs_node with @name and @ns under @parent and remove it.
|
|
|
|
* Returns 0 on success, -ENOENT if such entry doesn't exist.
|
2013-11-28 19:54:33 +00:00
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
|
2013-11-28 19:54:33 +00:00
|
|
|
const void *ns)
|
|
|
|
{
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *kn;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
if (!parent) {
|
2013-12-11 19:11:58 +00:00
|
|
|
WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
|
2013-11-28 19:54:33 +00:00
|
|
|
name);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2014-02-03 19:02:58 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
kn = kernfs_find_ns(parent, name, ns);
|
|
|
|
if (kn)
|
2014-02-03 19:02:58 +00:00
|
|
|
__kernfs_remove(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2014-02-03 19:02:58 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
if (kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kernfs_rename_ns - move and rename a kernfs_node
|
2013-12-11 19:11:53 +00:00
|
|
|
* @kn: target node
|
2013-11-28 19:54:33 +00:00
|
|
|
* @new_parent: new parent to put @sd under
|
|
|
|
* @new_name: new name
|
|
|
|
* @new_ns: new namespace tag
|
|
|
|
*/
|
2013-12-11 19:11:53 +00:00
|
|
|
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
|
2013-11-28 19:54:33 +00:00
|
|
|
const char *new_name, const void *new_ns)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2014-01-13 22:36:03 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
|
|
|
|
2013-12-11 21:02:56 +00:00
|
|
|
error = -ENOENT;
|
2014-01-13 22:36:03 +00:00
|
|
|
if ((kn->flags | new_parent->flags) & KERNFS_REMOVED)
|
2013-12-11 21:02:56 +00:00
|
|
|
goto out;
|
|
|
|
|
2013-11-28 19:54:33 +00:00
|
|
|
error = 0;
|
2013-12-11 19:11:54 +00:00
|
|
|
if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
|
|
|
|
(strcmp(kn->name, new_name) == 0))
|
2014-01-13 22:36:03 +00:00
|
|
|
goto out; /* nothing to rename */
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
error = -EEXIST;
|
|
|
|
if (kernfs_find_ns(new_parent, new_name, new_ns))
|
2014-01-13 22:36:03 +00:00
|
|
|
goto out;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
/* rename kernfs_node */
|
2013-12-11 19:11:54 +00:00
|
|
|
if (strcmp(kn->name, new_name) != 0) {
|
2013-11-28 19:54:33 +00:00
|
|
|
error = -ENOMEM;
|
|
|
|
new_name = kstrdup(new_name, GFP_KERNEL);
|
|
|
|
if (!new_name)
|
2014-01-13 22:36:03 +00:00
|
|
|
goto out;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 21:02:58 +00:00
|
|
|
if (kn->flags & KERNFS_STATIC_NAME)
|
|
|
|
kn->flags &= ~KERNFS_STATIC_NAME;
|
|
|
|
else
|
|
|
|
kfree(kn->name);
|
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->name = new_name;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move to the appropriate place in the appropriate directories rbtree.
|
|
|
|
*/
|
2013-12-11 19:11:58 +00:00
|
|
|
kernfs_unlink_sibling(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
kernfs_get(new_parent);
|
2013-12-11 19:11:54 +00:00
|
|
|
kernfs_put(kn->parent);
|
|
|
|
kn->ns = new_ns;
|
2013-12-11 19:11:58 +00:00
|
|
|
kn->hash = kernfs_name_hash(kn->name, kn->ns);
|
2013-12-11 19:11:54 +00:00
|
|
|
kn->parent = new_parent;
|
2013-12-11 19:11:58 +00:00
|
|
|
kernfs_link_sibling(kn);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
|
|
|
error = 0;
|
2014-01-13 22:36:03 +00:00
|
|
|
out:
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Relationship between s_mode and the DT_xxx types */
|
2013-12-11 19:11:53 +00:00
|
|
|
static inline unsigned char dt_type(struct kernfs_node *kn)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:54 +00:00
|
|
|
return (kn->mode >> 12) & 15;
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
kernfs_put(filp->private_data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static struct kernfs_node *kernfs_dir_pos(const void *ns,
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
if (pos) {
|
2014-01-13 22:36:03 +00:00
|
|
|
int valid = !(pos->flags & KERNFS_REMOVED) &&
|
|
|
|
pos->parent == parent && hash == pos->hash;
|
2013-11-28 19:54:33 +00:00
|
|
|
kernfs_put(pos);
|
|
|
|
if (!valid)
|
|
|
|
pos = NULL;
|
|
|
|
}
|
|
|
|
if (!pos && (hash > 1) && (hash < INT_MAX)) {
|
2013-12-11 19:11:54 +00:00
|
|
|
struct rb_node *node = parent->dir.children.rb_node;
|
2013-11-28 19:54:33 +00:00
|
|
|
while (node) {
|
2013-12-11 19:11:53 +00:00
|
|
|
pos = rb_to_kn(node);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
if (hash < pos->hash)
|
2013-11-28 19:54:33 +00:00
|
|
|
node = node->rb_left;
|
2013-12-11 19:11:54 +00:00
|
|
|
else if (hash > pos->hash)
|
2013-11-28 19:54:33 +00:00
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Skip over entries in the wrong namespace */
|
2013-12-11 19:11:54 +00:00
|
|
|
while (pos && pos->ns != ns) {
|
|
|
|
struct rb_node *node = rb_next(&pos->rb);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (!node)
|
|
|
|
pos = NULL;
|
|
|
|
else
|
2013-12-11 19:11:53 +00:00
|
|
|
pos = rb_to_kn(node);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
2013-12-11 19:11:58 +00:00
|
|
|
pos = kernfs_dir_pos(ns, parent, ino, pos);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (pos)
|
|
|
|
do {
|
2013-12-11 19:11:54 +00:00
|
|
|
struct rb_node *node = rb_next(&pos->rb);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (!node)
|
|
|
|
pos = NULL;
|
|
|
|
else
|
2013-12-11 19:11:53 +00:00
|
|
|
pos = rb_to_kn(node);
|
2013-12-11 19:11:54 +00:00
|
|
|
} while (pos && pos->ns != ns);
|
2013-11-28 19:54:33 +00:00
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
struct dentry *dentry = file->f_path.dentry;
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *parent = dentry->d_fsdata;
|
|
|
|
struct kernfs_node *pos = file->private_data;
|
2013-11-28 19:54:33 +00:00
|
|
|
const void *ns = NULL;
|
|
|
|
|
|
|
|
if (!dir_emit_dots(file, ctx))
|
|
|
|
return 0;
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:53 +00:00
|
|
|
if (kernfs_ns_enabled(parent))
|
2013-12-11 19:11:55 +00:00
|
|
|
ns = kernfs_info(dentry->d_sb)->ns;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
|
2013-11-28 19:54:33 +00:00
|
|
|
pos;
|
2013-12-11 19:11:58 +00:00
|
|
|
pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
|
2013-12-11 19:11:54 +00:00
|
|
|
const char *name = pos->name;
|
2013-11-28 19:54:33 +00:00
|
|
|
unsigned int type = dt_type(pos);
|
|
|
|
int len = strlen(name);
|
2013-12-11 19:11:54 +00:00
|
|
|
ino_t ino = pos->ino;
|
2013-11-28 19:54:33 +00:00
|
|
|
|
2013-12-11 19:11:54 +00:00
|
|
|
ctx->pos = pos->hash;
|
2013-11-28 19:54:33 +00:00
|
|
|
file->private_data = pos;
|
|
|
|
kernfs_get(pos);
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
if (!dir_emit(ctx, name, len, ino, type))
|
|
|
|
return 0;
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_lock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
}
|
2013-12-11 19:11:57 +00:00
|
|
|
mutex_unlock(&kernfs_mutex);
|
2013-11-28 19:54:33 +00:00
|
|
|
file->private_data = NULL;
|
|
|
|
ctx->pos = INT_MAX;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:58 +00:00
|
|
|
static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
|
|
|
|
int whence)
|
2013-11-28 19:54:33 +00:00
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(file);
|
|
|
|
loff_t ret;
|
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
ret = generic_file_llseek(file, offset, whence);
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-12-11 19:11:57 +00:00
|
|
|
const struct file_operations kernfs_dir_fops = {
|
2013-11-28 19:54:33 +00:00
|
|
|
.read = generic_read_dir,
|
2013-12-11 19:11:58 +00:00
|
|
|
.iterate = kernfs_fop_readdir,
|
|
|
|
.release = kernfs_dir_fop_release,
|
|
|
|
.llseek = kernfs_dir_fop_llseek,
|
2013-11-28 19:54:33 +00:00
|
|
|
};
|