mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-12-04 10:35:02 +00:00
locking, sched, cgroups: Annotate release_list_lock as raw
The release_list_lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f032a45081
commit
cdcc136ffd
@ -265,7 +265,7 @@ list_for_each_entry(_root, &roots, root_list)
|
||||
/* the list of cgroups eligible for automatic release. Protected by
|
||||
* release_list_lock */
|
||||
static LIST_HEAD(release_list);
|
||||
static DEFINE_SPINLOCK(release_list_lock);
|
||||
static DEFINE_RAW_SPINLOCK(release_list_lock);
|
||||
static void cgroup_release_agent(struct work_struct *work);
|
||||
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
|
||||
static void check_for_release(struct cgroup *cgrp);
|
||||
@ -4014,11 +4014,11 @@ again:
|
||||
finish_wait(&cgroup_rmdir_waitq, &wait);
|
||||
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
|
||||
|
||||
spin_lock(&release_list_lock);
|
||||
raw_spin_lock(&release_list_lock);
|
||||
set_bit(CGRP_REMOVED, &cgrp->flags);
|
||||
if (!list_empty(&cgrp->release_list))
|
||||
list_del_init(&cgrp->release_list);
|
||||
spin_unlock(&release_list_lock);
|
||||
raw_spin_unlock(&release_list_lock);
|
||||
|
||||
cgroup_lock_hierarchy(cgrp->root);
|
||||
/* delete this cgroup from parent->children */
|
||||
@ -4671,13 +4671,13 @@ static void check_for_release(struct cgroup *cgrp)
|
||||
* already queued for a userspace notification, queue
|
||||
* it now */
|
||||
int need_schedule_work = 0;
|
||||
spin_lock(&release_list_lock);
|
||||
raw_spin_lock(&release_list_lock);
|
||||
if (!cgroup_is_removed(cgrp) &&
|
||||
list_empty(&cgrp->release_list)) {
|
||||
list_add(&cgrp->release_list, &release_list);
|
||||
need_schedule_work = 1;
|
||||
}
|
||||
spin_unlock(&release_list_lock);
|
||||
raw_spin_unlock(&release_list_lock);
|
||||
if (need_schedule_work)
|
||||
schedule_work(&release_agent_work);
|
||||
}
|
||||
@ -4729,7 +4729,7 @@ static void cgroup_release_agent(struct work_struct *work)
|
||||
{
|
||||
BUG_ON(work != &release_agent_work);
|
||||
mutex_lock(&cgroup_mutex);
|
||||
spin_lock(&release_list_lock);
|
||||
raw_spin_lock(&release_list_lock);
|
||||
while (!list_empty(&release_list)) {
|
||||
char *argv[3], *envp[3];
|
||||
int i;
|
||||
@ -4738,7 +4738,7 @@ static void cgroup_release_agent(struct work_struct *work)
|
||||
struct cgroup,
|
||||
release_list);
|
||||
list_del_init(&cgrp->release_list);
|
||||
spin_unlock(&release_list_lock);
|
||||
raw_spin_unlock(&release_list_lock);
|
||||
pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!pathbuf)
|
||||
goto continue_free;
|
||||
@ -4768,9 +4768,9 @@ static void cgroup_release_agent(struct work_struct *work)
|
||||
continue_free:
|
||||
kfree(pathbuf);
|
||||
kfree(agentbuf);
|
||||
spin_lock(&release_list_lock);
|
||||
raw_spin_lock(&release_list_lock);
|
||||
}
|
||||
spin_unlock(&release_list_lock);
|
||||
raw_spin_unlock(&release_list_lock);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user