2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/fs/proc/base.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
*
|
|
|
|
* proc base directory handling functions
|
|
|
|
*
|
|
|
|
* 1999, Al Viro. Rewritten. Now it covers the whole per-process part.
|
|
|
|
* Instead of using magical inumbers to determine the kind of object
|
|
|
|
* we allocate and fill in-core inodes upon lookup. They don't even
|
|
|
|
* go into icache. We cache the reference to task_struct upon lookup too.
|
|
|
|
* Eventually it should become a filesystem in its own. We don't use the
|
|
|
|
* rest of procfs anymore.
|
2005-09-03 22:55:10 +00:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* Changelog:
|
|
|
|
* 17-Jan-2005
|
|
|
|
* Allan Bezerra
|
|
|
|
* Bruna Moreira <bruna.moreira@indt.org.br>
|
|
|
|
* Edjard Mota <edjard.mota@indt.org.br>
|
|
|
|
* Ilias Biris <ilias.biris@indt.org.br>
|
|
|
|
* Mauricio Lin <mauricio.lin@indt.org.br>
|
|
|
|
*
|
|
|
|
* Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
|
|
|
|
*
|
|
|
|
* A new process specific entry (smaps) included in /proc. It shows the
|
|
|
|
* size of rss for each memory area. The maps entry lacks information
|
|
|
|
* about physical memory size (rss) for each mapped file, i.e.,
|
|
|
|
* rss information for executables and library files.
|
|
|
|
* This additional information is useful for any tools that need to know
|
|
|
|
* about physical memory consumption for a process specific library.
|
|
|
|
*
|
|
|
|
* Changelog:
|
|
|
|
* 21-Feb-2005
|
|
|
|
* Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
|
|
|
|
* Pud inclusion in the page table walking.
|
|
|
|
*
|
|
|
|
* ChangeLog:
|
|
|
|
* 10-Mar-2005
|
|
|
|
* 10LE Instituto Nokia de Tecnologia - INdT:
|
|
|
|
* A better way to walks through the page table as suggested by Hugh Dickins.
|
|
|
|
*
|
|
|
|
* Simo Piiroinen <simo.piiroinen@nokia.com>:
|
|
|
|
* Smaps information related to shared, private, clean and dirty pages.
|
|
|
|
*
|
|
|
|
* Paul Mundt <paul.mundt@nokia.com>:
|
|
|
|
* Overall revision about smaps.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/stat.h>
|
2008-07-27 15:29:15 +00:00
|
|
|
#include <linux/task_io_accounting_ops.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/init.h>
|
2006-01-11 20:17:46 +00:00
|
|
|
#include <linux/capability.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/file.h>
|
2008-04-24 11:44:08 +00:00
|
|
|
#include <linux/fdtable.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/namei.h>
|
2006-12-08 10:37:56 +00:00
|
|
|
#include <linux/mnt_namespace.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mm.h>
|
2005-09-09 20:04:14 +00:00
|
|
|
#include <linux/rcupdate.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/kallsyms.h>
|
2008-11-10 08:26:08 +00:00
|
|
|
#include <linux/stacktrace.h>
|
2007-10-19 06:40:37 +00:00
|
|
|
#include <linux/resource.h>
|
2007-05-08 07:26:04 +00:00
|
|
|
#include <linux/module.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/ptrace.h>
|
2008-07-26 02:45:49 +00:00
|
|
|
#include <linux/tracehook.h>
|
2007-10-19 06:39:35 +00:00
|
|
|
#include <linux/cgroup.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/cpuset.h>
|
|
|
|
#include <linux/audit.h>
|
2005-11-07 22:15:49 +00:00
|
|
|
#include <linux/poll.h>
|
2006-10-02 09:18:08 +00:00
|
|
|
#include <linux/nsproxy.h>
|
2006-10-20 06:28:32 +00:00
|
|
|
#include <linux/oom.h>
|
2007-07-19 08:48:28 +00:00
|
|
|
#include <linux/elf.h>
|
2007-10-19 06:40:03 +00:00
|
|
|
#include <linux/pid_namespace.h>
|
2009-03-29 23:50:06 +00:00
|
|
|
#include <linux/fs_struct.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "internal.h"
|
|
|
|
|
2006-06-26 07:25:46 +00:00
|
|
|
/* NOTE:
|
|
|
|
* Implementing inode permission operations in /proc is almost
|
|
|
|
* certainly an error. Permission checks need to happen during
|
|
|
|
* each system call not at open time. The reason is that most of
|
|
|
|
* what we wish to check for permissions in /proc varies at runtime.
|
|
|
|
*
|
|
|
|
* The classic example of a problem is opening file descriptors
|
|
|
|
* in /proc for a task before it execs a suid executable.
|
|
|
|
*/
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct pid_entry {
|
|
|
|
char *name;
|
2007-05-08 07:26:15 +00:00
|
|
|
int len;
|
2005-04-16 22:20:36 +00:00
|
|
|
mode_t mode;
|
2007-02-12 08:55:40 +00:00
|
|
|
const struct inode_operations *iop;
|
2007-02-12 08:55:34 +00:00
|
|
|
const struct file_operations *fop;
|
2006-10-02 09:17:07 +00:00
|
|
|
union proc_op op;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
#define NOD(NAME, MODE, IOP, FOP, OP) { \
|
2006-10-02 09:17:07 +00:00
|
|
|
.name = (NAME), \
|
2007-05-08 07:26:15 +00:00
|
|
|
.len = sizeof(NAME) - 1, \
|
2006-10-02 09:17:07 +00:00
|
|
|
.mode = MODE, \
|
|
|
|
.iop = IOP, \
|
|
|
|
.fop = FOP, \
|
|
|
|
.op = OP, \
|
|
|
|
}
|
|
|
|
|
2008-11-09 22:32:52 +00:00
|
|
|
#define DIR(NAME, MODE, iops, fops) \
|
|
|
|
NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} )
|
|
|
|
#define LNK(NAME, get_link) \
|
2006-10-02 09:18:49 +00:00
|
|
|
NOD(NAME, (S_IFLNK|S_IRWXUGO), \
|
2006-10-02 09:17:07 +00:00
|
|
|
&proc_pid_link_inode_operations, NULL, \
|
2008-11-09 22:32:52 +00:00
|
|
|
{ .proc_get_link = get_link } )
|
|
|
|
#define REG(NAME, MODE, fops) \
|
|
|
|
NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
|
|
|
|
#define INF(NAME, MODE, read) \
|
2006-10-02 09:18:49 +00:00
|
|
|
NOD(NAME, (S_IFREG|(MODE)), \
|
2006-10-02 09:17:07 +00:00
|
|
|
NULL, &proc_info_file_operations, \
|
2008-11-09 22:32:52 +00:00
|
|
|
{ .proc_read = read } )
|
|
|
|
#define ONE(NAME, MODE, show) \
|
2008-02-08 12:18:30 +00:00
|
|
|
NOD(NAME, (S_IFREG|(MODE)), \
|
|
|
|
NULL, &proc_single_file_operations, \
|
2008-11-09 22:32:52 +00:00
|
|
|
{ .proc_show = show } )
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-06-06 05:46:53 +00:00
|
|
|
/*
|
|
|
|
* Count the number of hardlinks for the pid_entry table, excluding the .
|
|
|
|
* and .. links.
|
|
|
|
*/
|
|
|
|
static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
if (S_ISDIR(entries[i].mode))
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2009-03-28 23:21:27 +00:00
|
|
|
static int get_fs_path(struct task_struct *task, struct path *path, bool root)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct fs_struct *fs;
|
2009-03-28 23:21:27 +00:00
|
|
|
int result = -ENOENT;
|
|
|
|
|
2005-09-06 22:18:22 +00:00
|
|
|
task_lock(task);
|
|
|
|
fs = task->fs;
|
2009-03-28 23:21:27 +00:00
|
|
|
if (fs) {
|
|
|
|
read_lock(&fs->lock);
|
|
|
|
*path = root ? fs->root : fs->pwd;
|
|
|
|
path_get(path);
|
|
|
|
read_unlock(&fs->lock);
|
|
|
|
result = 0;
|
|
|
|
}
|
2005-09-06 22:18:22 +00:00
|
|
|
task_unlock(task);
|
2009-03-28 23:21:27 +00:00
|
|
|
return result;
|
2005-09-06 22:18:22 +00:00
|
|
|
}
|
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
static int get_nr_threads(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
if (lock_task_sighand(tsk, &flags)) {
|
|
|
|
count = atomic_read(&tsk->signal->count);
|
|
|
|
unlock_task_sighand(tsk, &flags);
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
static int proc_cwd_link(struct inode *inode, struct path *path)
|
2005-09-06 22:18:22 +00:00
|
|
|
{
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2005-09-06 22:18:22 +00:00
|
|
|
int result = -ENOENT;
|
2006-06-26 07:25:55 +00:00
|
|
|
|
|
|
|
if (task) {
|
2009-03-28 23:21:27 +00:00
|
|
|
result = get_fs_path(task, path, 0);
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
static int proc_root_link(struct inode *inode, struct path *path)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
int result = -ENOENT;
|
2006-06-26 07:25:55 +00:00
|
|
|
|
|
|
|
if (task) {
|
2009-03-28 23:21:27 +00:00
|
|
|
result = get_fs_path(task, path, 1);
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2008-04-29 08:01:38 +00:00
|
|
|
/*
|
|
|
|
* Return zero if current may access user memory in @task, -error if not.
|
|
|
|
*/
|
|
|
|
static int check_mem_permission(struct task_struct *task)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* A task can always look at itself, in case it chooses
|
|
|
|
* to use system calls instead of load instructions.
|
|
|
|
*/
|
|
|
|
if (task == current)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If current is actively ptrace'ing, and would also be
|
|
|
|
* permitted to freshly attach with ptrace now, permit it.
|
|
|
|
*/
|
2008-07-26 02:45:49 +00:00
|
|
|
if (task_is_stopped_or_traced(task)) {
|
|
|
|
int match;
|
|
|
|
rcu_read_lock();
|
|
|
|
match = (tracehook_tracer_task(task) == current);
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
|
|
|
|
return 0;
|
|
|
|
}
|
2008-04-29 08:01:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Noone else is allowed.
|
|
|
|
*/
|
|
|
|
return -EPERM;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-02 14:09:57 +00:00
|
|
|
struct mm_struct *mm_for_maps(struct task_struct *task)
|
|
|
|
{
|
2009-07-10 01:27:40 +00:00
|
|
|
struct mm_struct *mm;
|
|
|
|
|
|
|
|
if (mutex_lock_killable(&task->cred_guard_mutex))
|
|
|
|
return NULL;
|
2009-07-10 01:27:38 +00:00
|
|
|
|
2009-07-10 01:27:40 +00:00
|
|
|
mm = get_task_mm(task);
|
|
|
|
if (mm && mm != current->mm &&
|
|
|
|
!ptrace_may_access(task, PTRACE_MODE_READ)) {
|
|
|
|
mmput(mm);
|
|
|
|
mm = NULL;
|
2009-06-23 19:25:32 +00:00
|
|
|
}
|
2009-07-10 01:27:40 +00:00
|
|
|
mutex_unlock(&task->cred_guard_mutex);
|
|
|
|
|
2008-01-02 14:09:57 +00:00
|
|
|
return mm;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int proc_pid_cmdline(struct task_struct *task, char * buffer)
|
|
|
|
{
|
|
|
|
int res = 0;
|
|
|
|
unsigned int len;
|
|
|
|
struct mm_struct *mm = get_task_mm(task);
|
|
|
|
if (!mm)
|
|
|
|
goto out;
|
|
|
|
if (!mm->arg_end)
|
|
|
|
goto out_mm; /* Shh! No looking before we're done */
|
|
|
|
|
|
|
|
len = mm->arg_end - mm->arg_start;
|
|
|
|
|
|
|
|
if (len > PAGE_SIZE)
|
|
|
|
len = PAGE_SIZE;
|
|
|
|
|
|
|
|
res = access_process_vm(task, mm->arg_start, buffer, len, 0);
|
|
|
|
|
|
|
|
// If the nul at the end of args has been overwritten, then
|
|
|
|
// assume application is using setproctitle(3).
|
|
|
|
if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
|
|
|
|
len = strnlen(buffer, res);
|
|
|
|
if (len < res) {
|
|
|
|
res = len;
|
|
|
|
} else {
|
|
|
|
len = mm->env_end - mm->env_start;
|
|
|
|
if (len > PAGE_SIZE - res)
|
|
|
|
len = PAGE_SIZE - res;
|
|
|
|
res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
|
|
|
|
res = strnlen(buffer, res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out_mm:
|
|
|
|
mmput(mm);
|
|
|
|
out:
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int proc_pid_auxv(struct task_struct *task, char *buffer)
|
|
|
|
{
|
|
|
|
int res = 0;
|
|
|
|
struct mm_struct *mm = get_task_mm(task);
|
|
|
|
if (mm) {
|
|
|
|
unsigned int nwords = 0;
|
2008-12-30 15:49:13 +00:00
|
|
|
do {
|
2005-04-16 22:20:36 +00:00
|
|
|
nwords += 2;
|
2008-12-30 15:49:13 +00:00
|
|
|
} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
|
2005-04-16 22:20:36 +00:00
|
|
|
res = nwords * sizeof(mm->saved_auxv[0]);
|
|
|
|
if (res > PAGE_SIZE)
|
|
|
|
res = PAGE_SIZE;
|
|
|
|
memcpy(buffer, mm->saved_auxv, res);
|
|
|
|
mmput(mm);
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
|
/*
|
|
|
|
* Provides a wchan file via kallsyms in a proper one-value-per-file format.
|
|
|
|
* Returns the resolved symbol. If that fails, simply return the address.
|
|
|
|
*/
|
|
|
|
static int proc_pid_wchan(struct task_struct *task, char *buffer)
|
|
|
|
{
|
2007-05-08 07:28:41 +00:00
|
|
|
unsigned long wchan;
|
2007-07-17 11:03:51 +00:00
|
|
|
char symname[KSYM_NAME_LEN];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
wchan = get_wchan(task);
|
|
|
|
|
2007-05-08 07:28:43 +00:00
|
|
|
if (lookup_symbol_name(wchan, symname) < 0)
|
2009-05-04 18:51:14 +00:00
|
|
|
if (!ptrace_may_access(task, PTRACE_MODE_READ))
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return sprintf(buffer, "%lu", wchan);
|
2007-05-08 07:28:43 +00:00
|
|
|
else
|
|
|
|
return sprintf(buffer, "%s", symname);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_KALLSYMS */
|
|
|
|
|
2008-11-10 08:26:08 +00:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
|
|
|
|
|
|
|
#define MAX_STACK_TRACE_DEPTH 64
|
|
|
|
|
|
|
|
static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
|
|
|
|
struct pid *pid, struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct stack_trace trace;
|
|
|
|
unsigned long *entries;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
|
|
|
|
if (!entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
trace.nr_entries = 0;
|
|
|
|
trace.max_entries = MAX_STACK_TRACE_DEPTH;
|
|
|
|
trace.entries = entries;
|
|
|
|
trace.skip = 0;
|
|
|
|
save_stack_trace_tsk(task, &trace);
|
|
|
|
|
|
|
|
for (i = 0; i < trace.nr_entries; i++) {
|
|
|
|
seq_printf(m, "[<%p>] %pS\n",
|
|
|
|
(void *)entries[i], (void *)entries[i]);
|
|
|
|
}
|
|
|
|
kfree(entries);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
|
/*
|
|
|
|
* Provides /proc/PID/schedstat
|
|
|
|
*/
|
|
|
|
static int proc_pid_schedstat(struct task_struct *task, char *buffer)
|
|
|
|
{
|
2007-07-09 16:52:00 +00:00
|
|
|
return sprintf(buffer, "%llu %llu %lu\n",
|
2008-12-22 06:37:41 +00:00
|
|
|
(unsigned long long)task->se.sum_exec_runtime,
|
|
|
|
(unsigned long long)task->sched_info.run_delay,
|
2007-10-15 15:00:12 +00:00
|
|
|
task->sched_info.pcount);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-01-25 20:08:34 +00:00
|
|
|
#ifdef CONFIG_LATENCYTOP
|
|
|
|
static int lstats_show_proc(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
int i;
|
2008-02-21 00:53:29 +00:00
|
|
|
struct inode *inode = m->private;
|
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2008-01-25 20:08:34 +00:00
|
|
|
|
2008-02-21 00:53:29 +00:00
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
|
|
|
seq_puts(m, "Latency Top version : v0.1\n");
|
2008-01-25 20:08:34 +00:00
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
if (task->latency_record[i].backtrace[0]) {
|
|
|
|
int q;
|
|
|
|
seq_printf(m, "%i %li %li ",
|
|
|
|
task->latency_record[i].count,
|
|
|
|
task->latency_record[i].time,
|
|
|
|
task->latency_record[i].max);
|
|
|
|
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
2008-12-09 21:14:27 +00:00
|
|
|
char sym[KSYM_SYMBOL_LEN];
|
2008-01-25 20:08:34 +00:00
|
|
|
char *c;
|
|
|
|
if (!task->latency_record[i].backtrace[q])
|
|
|
|
break;
|
|
|
|
if (task->latency_record[i].backtrace[q] == ULONG_MAX)
|
|
|
|
break;
|
|
|
|
sprint_symbol(sym, task->latency_record[i].backtrace[q]);
|
|
|
|
c = strchr(sym, '+');
|
|
|
|
if (c)
|
|
|
|
*c = 0;
|
|
|
|
seq_printf(m, "%s ", sym);
|
|
|
|
}
|
|
|
|
seq_printf(m, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2008-02-21 00:53:29 +00:00
|
|
|
put_task_struct(task);
|
2008-01-25 20:08:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lstats_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2008-02-21 00:53:29 +00:00
|
|
|
return single_open(file, lstats_show_proc, inode);
|
2008-02-14 18:27:00 +00:00
|
|
|
}
|
|
|
|
|
2008-01-25 20:08:34 +00:00
|
|
|
static ssize_t lstats_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *offs)
|
|
|
|
{
|
2008-02-21 00:53:29 +00:00
|
|
|
struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
|
2008-01-25 20:08:34 +00:00
|
|
|
|
2008-02-21 00:53:29 +00:00
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
2008-01-25 20:08:34 +00:00
|
|
|
clear_all_latency_tracing(task);
|
2008-02-21 00:53:29 +00:00
|
|
|
put_task_struct(task);
|
2008-01-25 20:08:34 +00:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_lstats_operations = {
|
|
|
|
.open = lstats_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = lstats_write,
|
|
|
|
.llseek = seq_lseek,
|
2008-02-21 00:53:29 +00:00
|
|
|
.release = single_release,
|
2008-01-25 20:08:34 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* The badness from the OOM killer */
|
|
|
|
unsigned long badness(struct task_struct *p, unsigned long uptime);
|
|
|
|
static int proc_oom_score(struct task_struct *task, char *buffer)
|
|
|
|
{
|
|
|
|
unsigned long points;
|
|
|
|
struct timespec uptime;
|
|
|
|
|
|
|
|
do_posix_clock_monotonic_gettime(&uptime);
|
2007-05-08 07:26:46 +00:00
|
|
|
read_lock(&tasklist_lock);
|
2009-09-22 00:03:14 +00:00
|
|
|
points = badness(task->group_leader, uptime.tv_sec);
|
2007-05-08 07:26:46 +00:00
|
|
|
read_unlock(&tasklist_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
return sprintf(buffer, "%lu\n", points);
|
|
|
|
}
|
|
|
|
|
2007-10-19 06:40:37 +00:00
|
|
|
struct limit_names {
|
|
|
|
char *name;
|
|
|
|
char *unit;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct limit_names lnames[RLIM_NLIMITS] = {
|
|
|
|
[RLIMIT_CPU] = {"Max cpu time", "ms"},
|
|
|
|
[RLIMIT_FSIZE] = {"Max file size", "bytes"},
|
|
|
|
[RLIMIT_DATA] = {"Max data size", "bytes"},
|
|
|
|
[RLIMIT_STACK] = {"Max stack size", "bytes"},
|
|
|
|
[RLIMIT_CORE] = {"Max core file size", "bytes"},
|
|
|
|
[RLIMIT_RSS] = {"Max resident set", "bytes"},
|
|
|
|
[RLIMIT_NPROC] = {"Max processes", "processes"},
|
|
|
|
[RLIMIT_NOFILE] = {"Max open files", "files"},
|
|
|
|
[RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"},
|
|
|
|
[RLIMIT_AS] = {"Max address space", "bytes"},
|
|
|
|
[RLIMIT_LOCKS] = {"Max file locks", "locks"},
|
|
|
|
[RLIMIT_SIGPENDING] = {"Max pending signals", "signals"},
|
|
|
|
[RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"},
|
|
|
|
[RLIMIT_NICE] = {"Max nice priority", NULL},
|
|
|
|
[RLIMIT_RTPRIO] = {"Max realtime priority", NULL},
|
2008-02-23 23:23:52 +00:00
|
|
|
[RLIMIT_RTTIME] = {"Max realtime timeout", "us"},
|
2007-10-19 06:40:37 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Display limits for a process */
|
|
|
|
static int proc_pid_limits(struct task_struct *task, char *buffer)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int count = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
char *bufptr = buffer;
|
|
|
|
|
|
|
|
struct rlimit rlim[RLIM_NLIMITS];
|
|
|
|
|
2008-10-04 20:51:15 +00:00
|
|
|
if (!lock_task_sighand(task, &flags))
|
2007-10-19 06:40:37 +00:00
|
|
|
return 0;
|
|
|
|
memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
|
|
|
|
unlock_task_sighand(task, &flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* print the file header
|
|
|
|
*/
|
|
|
|
count += sprintf(&bufptr[count], "%-25s %-20s %-20s %-10s\n",
|
|
|
|
"Limit", "Soft Limit", "Hard Limit", "Units");
|
|
|
|
|
|
|
|
for (i = 0; i < RLIM_NLIMITS; i++) {
|
|
|
|
if (rlim[i].rlim_cur == RLIM_INFINITY)
|
|
|
|
count += sprintf(&bufptr[count], "%-25s %-20s ",
|
|
|
|
lnames[i].name, "unlimited");
|
|
|
|
else
|
|
|
|
count += sprintf(&bufptr[count], "%-25s %-20lu ",
|
|
|
|
lnames[i].name, rlim[i].rlim_cur);
|
|
|
|
|
|
|
|
if (rlim[i].rlim_max == RLIM_INFINITY)
|
|
|
|
count += sprintf(&bufptr[count], "%-20s ", "unlimited");
|
|
|
|
else
|
|
|
|
count += sprintf(&bufptr[count], "%-20lu ",
|
|
|
|
rlim[i].rlim_max);
|
|
|
|
|
|
|
|
if (lnames[i].unit)
|
|
|
|
count += sprintf(&bufptr[count], "%-10s\n",
|
|
|
|
lnames[i].unit);
|
|
|
|
else
|
|
|
|
count += sprintf(&bufptr[count], "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2008-07-26 02:46:00 +00:00
|
|
|
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
|
|
|
|
static int proc_pid_syscall(struct task_struct *task, char *buffer)
|
|
|
|
{
|
|
|
|
long nr;
|
|
|
|
unsigned long args[6], sp, pc;
|
|
|
|
|
|
|
|
if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
|
|
|
|
return sprintf(buffer, "running\n");
|
|
|
|
|
|
|
|
if (nr < 0)
|
|
|
|
return sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
|
|
|
|
|
|
|
|
return sprintf(buffer,
|
|
|
|
"%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
|
|
|
|
nr,
|
|
|
|
args[0], args[1], args[2], args[3], args[4], args[5],
|
|
|
|
sp, pc);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/************************************************************************/
|
|
|
|
/* Here the fs part begins */
|
|
|
|
/************************************************************************/
|
|
|
|
|
|
|
|
/* permission checks */
|
2006-06-26 07:25:58 +00:00
|
|
|
static int proc_fd_access_allowed(struct inode *inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-06-26 07:25:58 +00:00
|
|
|
struct task_struct *task;
|
|
|
|
int allowed = 0;
|
2006-06-26 07:25:59 +00:00
|
|
|
/* Allow access to a task's file descriptors if it is us or we
|
|
|
|
* may use ptrace attach to the process and find out that
|
|
|
|
* information.
|
2006-06-26 07:25:58 +00:00
|
|
|
*/
|
|
|
|
task = get_proc_task(inode);
|
2006-06-26 07:25:59 +00:00
|
|
|
if (task) {
|
Security: split proc ptrace checking into read vs. attach
Enable security modules to distinguish reading of process state via
proc from full ptrace access by renaming ptrace_may_attach to
ptrace_may_access and adding a mode argument indicating whether only
read access or full attach access is requested. This allows security
modules to permit access to reading process state without granting
full ptrace access. The base DAC/capability checking remains unchanged.
Read access to /proc/pid/mem continues to apply a full ptrace attach
check since check_mem_permission() already requires the current task
to already be ptracing the target. The other ptrace checks within
proc for elements like environ, maps, and fds are changed to pass the
read mode instead of attach.
In the SELinux case, we model such reading of process state as a
reading of a proc file labeled with the target process' label. This
enables SELinux policy to permit such reading of process state without
permitting control or manipulation of the target process, as there are
a number of cases where programs probe for such information via proc
but do not need to be able to control the target (e.g. procps,
lsof, PolicyKit, ConsoleKit). At present we have to choose between
allowing full ptrace in policy (more permissive than required/desired)
or breaking functionality (or in some cases just silencing the denials
via dontaudit rules but this can hide genuine attacks).
This version of the patch incorporates comments from Casey Schaufler
(change/replace existing ptrace_may_attach interface, pass access
mode), and Chris Wright (provide greater consistency in the checking).
Note that like their predecessors __ptrace_may_attach and
ptrace_may_attach, the __ptrace_may_access and ptrace_may_access
interfaces use different return value conventions from each other (0
or -errno vs. 1 or 0). I retained this difference to avoid any
changes to the caller logic but made the difference clearer by
changing the latter interface to return a bool rather than an int and
by adding a comment about it to ptrace.h for any future callers.
Signed-off-by: Stephen Smalley <sds@tycho.nsa.gov>
Acked-by: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-05-19 12:32:49 +00:00
|
|
|
allowed = ptrace_may_access(task, PTRACE_MODE_READ);
|
2006-06-26 07:25:58 +00:00
|
|
|
put_task_struct(task);
|
2006-06-26 07:25:59 +00:00
|
|
|
}
|
2006-06-26 07:25:58 +00:00
|
|
|
return allowed;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-07-15 19:26:45 +00:00
|
|
|
static int proc_setattr(struct dentry *dentry, struct iattr *attr)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
|
|
|
|
if (attr->ia_valid & ATTR_MODE)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
error = inode_change_ok(inode, attr);
|
2007-05-08 07:29:41 +00:00
|
|
|
if (!error)
|
|
|
|
error = inode_setattr(inode, attr);
|
2006-07-15 19:26:45 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_def_inode_operations = {
|
2006-07-15 19:26:45 +00:00
|
|
|
.setattr = proc_setattr,
|
|
|
|
};
|
|
|
|
|
2008-03-27 12:06:24 +00:00
|
|
|
static int mounts_open_common(struct inode *inode, struct file *file,
|
|
|
|
const struct seq_operations *op)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2007-10-19 06:39:54 +00:00
|
|
|
struct nsproxy *nsp;
|
2006-12-08 10:37:56 +00:00
|
|
|
struct mnt_namespace *ns = NULL;
|
2008-03-27 12:06:24 +00:00
|
|
|
struct path root;
|
2005-11-07 22:15:49 +00:00
|
|
|
struct proc_mounts *p;
|
|
|
|
int ret = -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (task) {
|
2007-10-19 06:39:54 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
nsp = task_nsproxy(task);
|
|
|
|
if (nsp) {
|
|
|
|
ns = nsp->mnt_ns;
|
2007-01-26 08:56:53 +00:00
|
|
|
if (ns)
|
|
|
|
get_mnt_ns(ns);
|
|
|
|
}
|
2007-10-19 06:39:54 +00:00
|
|
|
rcu_read_unlock();
|
2009-03-28 23:21:27 +00:00
|
|
|
if (ns && get_fs_path(task, &root, 1) == 0)
|
|
|
|
ret = 0;
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
}
|
2005-11-07 22:15:49 +00:00
|
|
|
|
2008-03-27 12:06:24 +00:00
|
|
|
if (!ns)
|
|
|
|
goto err;
|
2009-03-28 23:21:27 +00:00
|
|
|
if (ret)
|
2008-03-27 12:06:24 +00:00
|
|
|
goto err_put_ns;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
|
|
|
|
if (!p)
|
|
|
|
goto err_put_path;
|
|
|
|
|
|
|
|
file->private_data = &p->m;
|
|
|
|
ret = seq_open(file, op);
|
|
|
|
if (ret)
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
p->m.private = p;
|
|
|
|
p->ns = ns;
|
|
|
|
p->root = root;
|
|
|
|
p->event = ns->event;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
kfree(p);
|
|
|
|
err_put_path:
|
|
|
|
path_put(&root);
|
|
|
|
err_put_ns:
|
|
|
|
put_mnt_ns(ns);
|
|
|
|
err:
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mounts_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
2008-03-27 12:06:24 +00:00
|
|
|
struct proc_mounts *p = file->private_data;
|
|
|
|
path_put(&p->root);
|
|
|
|
put_mnt_ns(p->ns);
|
2005-04-16 22:20:36 +00:00
|
|
|
return seq_release(inode, file);
|
|
|
|
}
|
|
|
|
|
2005-11-07 22:15:49 +00:00
|
|
|
static unsigned mounts_poll(struct file *file, poll_table *wait)
|
|
|
|
{
|
|
|
|
struct proc_mounts *p = file->private_data;
|
2008-03-27 12:06:24 +00:00
|
|
|
struct mnt_namespace *ns = p->ns;
|
2009-04-09 04:57:59 +00:00
|
|
|
unsigned res = POLLIN | POLLRDNORM;
|
2005-11-07 22:15:49 +00:00
|
|
|
|
|
|
|
poll_wait(file, &ns->poll, wait);
|
|
|
|
|
|
|
|
spin_lock(&vfsmount_lock);
|
|
|
|
if (p->event != ns->event) {
|
|
|
|
p->event = ns->event;
|
2009-04-09 04:57:59 +00:00
|
|
|
res |= POLLERR | POLLPRI;
|
2005-11-07 22:15:49 +00:00
|
|
|
}
|
|
|
|
spin_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-03-27 12:06:24 +00:00
|
|
|
static int mounts_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return mounts_open_common(inode, file, &mounts_op);
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_mounts_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.open = mounts_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = mounts_release,
|
2005-11-07 22:15:49 +00:00
|
|
|
.poll = mounts_poll,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2008-03-27 12:06:25 +00:00
|
|
|
static int mountinfo_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return mounts_open_common(inode, file, &mountinfo_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_mountinfo_operations = {
|
|
|
|
.open = mountinfo_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = mounts_release,
|
|
|
|
.poll = mounts_poll,
|
|
|
|
};
|
|
|
|
|
2006-03-20 18:44:12 +00:00
|
|
|
static int mountstats_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2008-03-27 12:06:24 +00:00
|
|
|
return mounts_open_common(inode, file, &mountstats_op);
|
2006-03-20 18:44:12 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_mountstats_operations = {
|
2006-03-20 18:44:12 +00:00
|
|
|
.open = mountstats_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = mounts_release,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
|
|
|
|
|
|
|
|
static ssize_t proc_info_read(struct file * file, char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct inode * inode = file->f_path.dentry->d_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long page;
|
|
|
|
ssize_t length;
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(inode);
|
|
|
|
|
|
|
|
length = -ESRCH;
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (count > PROC_BLOCK_SIZE)
|
|
|
|
count = PROC_BLOCK_SIZE;
|
2006-06-26 07:25:55 +00:00
|
|
|
|
|
|
|
length = -ENOMEM;
|
2007-10-16 08:25:52 +00:00
|
|
|
if (!(page = __get_free_page(GFP_TEMPORARY)))
|
2006-06-26 07:25:55 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
length = PROC_I(inode)->op.proc_read(task, (char*)page);
|
|
|
|
|
|
|
|
if (length >= 0)
|
|
|
|
length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
|
|
|
|
free_page(page);
|
2006-06-26 07:25:55 +00:00
|
|
|
out:
|
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
2005-04-16 22:20:36 +00:00
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_info_file_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.read = proc_info_read,
|
|
|
|
};
|
|
|
|
|
2008-02-08 12:18:30 +00:00
|
|
|
static int proc_single_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct inode *inode = m->private;
|
|
|
|
struct pid_namespace *ns;
|
|
|
|
struct pid *pid;
|
|
|
|
struct task_struct *task;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ns = inode->i_sb->s_fs_info;
|
|
|
|
pid = proc_pid(inode);
|
|
|
|
task = get_pid_task(pid, PIDTYPE_PID);
|
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
|
|
|
|
|
|
|
ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
|
|
|
|
|
|
|
|
put_task_struct(task);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int proc_single_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
ret = single_open(filp, proc_single_show, NULL);
|
|
|
|
if (!ret) {
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
|
|
|
|
m->private = inode;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_single_file_operations = {
|
|
|
|
.open = proc_single_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int mem_open(struct inode* inode, struct file* file)
|
|
|
|
{
|
|
|
|
file->private_data = (void*)((long)current->self_exec_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t mem_read(struct file * file, char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
char *page;
|
|
|
|
unsigned long src = *ppos;
|
|
|
|
int ret = -ESRCH;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
|
2008-04-29 08:01:38 +00:00
|
|
|
if (check_mem_permission(task))
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
2007-10-16 08:25:52 +00:00
|
|
|
page = (char *)__get_free_page(GFP_TEMPORARY);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!page)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
mm = get_task_mm(task);
|
|
|
|
if (!mm)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = -EIO;
|
|
|
|
|
|
|
|
if (file->private_data != (void*)((long)current->self_exec_id))
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
while (count > 0) {
|
|
|
|
int this_len, retval;
|
|
|
|
|
|
|
|
this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
|
|
|
|
retval = access_process_vm(task, src, page, this_len, 0);
|
2008-04-29 08:01:38 +00:00
|
|
|
if (!retval || check_mem_permission(task)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!ret)
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_user(buf, page, retval)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret += retval;
|
|
|
|
src += retval;
|
|
|
|
buf += retval;
|
|
|
|
count -= retval;
|
|
|
|
}
|
|
|
|
*ppos = src;
|
|
|
|
|
|
|
|
out_put:
|
|
|
|
mmput(mm);
|
|
|
|
out_free:
|
|
|
|
free_page((unsigned long) page);
|
|
|
|
out:
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mem_write NULL
|
|
|
|
|
|
|
|
#ifndef mem_write
|
|
|
|
/* This is a security hazard */
|
2007-02-20 21:58:12 +00:00
|
|
|
static ssize_t mem_write(struct file * file, const char __user *buf,
|
2005-04-16 22:20:36 +00:00
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-09-29 09:01:02 +00:00
|
|
|
int copied;
|
2005-04-16 22:20:36 +00:00
|
|
|
char *page;
|
2006-12-08 10:36:36 +00:00
|
|
|
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long dst = *ppos;
|
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
copied = -ESRCH;
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
|
2008-04-29 08:01:38 +00:00
|
|
|
if (check_mem_permission(task))
|
2006-06-26 07:25:55 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
copied = -ENOMEM;
|
2007-10-16 08:25:52 +00:00
|
|
|
page = (char *)__get_free_page(GFP_TEMPORARY);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!page)
|
2006-06-26 07:25:55 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-29 09:01:02 +00:00
|
|
|
copied = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
while (count > 0) {
|
|
|
|
int this_len, retval;
|
|
|
|
|
|
|
|
this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
|
|
|
|
if (copy_from_user(page, buf, this_len)) {
|
|
|
|
copied = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
retval = access_process_vm(task, dst, page, this_len, 1);
|
|
|
|
if (!retval) {
|
|
|
|
if (!copied)
|
|
|
|
copied = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
copied += retval;
|
|
|
|
buf += retval;
|
|
|
|
dst += retval;
|
|
|
|
count -= retval;
|
|
|
|
}
|
|
|
|
*ppos = dst;
|
|
|
|
free_page((unsigned long) page);
|
2006-06-26 07:25:55 +00:00
|
|
|
out:
|
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
2005-04-16 22:20:36 +00:00
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-02-05 06:29:04 +00:00
|
|
|
loff_t mem_lseek(struct file *file, loff_t offset, int orig)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
switch (orig) {
|
|
|
|
case 0:
|
|
|
|
file->f_pos = offset;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
file->f_pos += offset;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
force_successful_syscall_return();
|
|
|
|
return file->f_pos;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_mem_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.llseek = mem_lseek,
|
|
|
|
.read = mem_read,
|
|
|
|
.write = mem_write,
|
|
|
|
.open = mem_open,
|
|
|
|
};
|
|
|
|
|
2007-10-17 06:30:17 +00:00
|
|
|
static ssize_t environ_read(struct file *file, char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
|
|
|
|
char *page;
|
|
|
|
unsigned long src = *ppos;
|
|
|
|
int ret = -ESRCH;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
|
Security: split proc ptrace checking into read vs. attach
Enable security modules to distinguish reading of process state via
proc from full ptrace access by renaming ptrace_may_attach to
ptrace_may_access and adding a mode argument indicating whether only
read access or full attach access is requested. This allows security
modules to permit access to reading process state without granting
full ptrace access. The base DAC/capability checking remains unchanged.
Read access to /proc/pid/mem continues to apply a full ptrace attach
check since check_mem_permission() already requires the current task
to already be ptracing the target. The other ptrace checks within
proc for elements like environ, maps, and fds are changed to pass the
read mode instead of attach.
In the SELinux case, we model such reading of process state as a
reading of a proc file labeled with the target process' label. This
enables SELinux policy to permit such reading of process state without
permitting control or manipulation of the target process, as there are
a number of cases where programs probe for such information via proc
but do not need to be able to control the target (e.g. procps,
lsof, PolicyKit, ConsoleKit). At present we have to choose between
allowing full ptrace in policy (more permissive than required/desired)
or breaking functionality (or in some cases just silencing the denials
via dontaudit rules but this can hide genuine attacks).
This version of the patch incorporates comments from Casey Schaufler
(change/replace existing ptrace_may_attach interface, pass access
mode), and Chris Wright (provide greater consistency in the checking).
Note that like their predecessors __ptrace_may_attach and
ptrace_may_attach, the __ptrace_may_access and ptrace_may_access
interfaces use different return value conventions from each other (0
or -errno vs. 1 or 0). I retained this difference to avoid any
changes to the caller logic but made the difference clearer by
changing the latter interface to return a bool rather than an int and
by adding a comment about it to ptrace.h for any future callers.
Signed-off-by: Stephen Smalley <sds@tycho.nsa.gov>
Acked-by: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-05-19 12:32:49 +00:00
|
|
|
if (!ptrace_may_access(task, PTRACE_MODE_READ))
|
2007-10-17 06:30:17 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
page = (char *)__get_free_page(GFP_TEMPORARY);
|
|
|
|
if (!page)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
mm = get_task_mm(task);
|
|
|
|
if (!mm)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
while (count > 0) {
|
|
|
|
int this_len, retval, max_len;
|
|
|
|
|
|
|
|
this_len = mm->env_end - (mm->env_start + src);
|
|
|
|
|
|
|
|
if (this_len <= 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
|
|
|
|
this_len = (this_len > max_len) ? max_len : this_len;
|
|
|
|
|
|
|
|
retval = access_process_vm(task, (mm->env_start + src),
|
|
|
|
page, this_len, 0);
|
|
|
|
|
|
|
|
if (retval <= 0) {
|
|
|
|
ret = retval;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_user(buf, page, retval)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret += retval;
|
|
|
|
src += retval;
|
|
|
|
buf += retval;
|
|
|
|
count -= retval;
|
|
|
|
}
|
|
|
|
*ppos = src;
|
|
|
|
|
|
|
|
mmput(mm);
|
|
|
|
out_free:
|
|
|
|
free_page((unsigned long) page);
|
|
|
|
out:
|
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_environ_operations = {
|
|
|
|
.read = environ_read,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static ssize_t oom_adjust_read(struct file *file, char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
|
2006-06-26 07:25:54 +00:00
|
|
|
char buffer[PROC_NUMBUF];
|
2005-04-16 22:20:36 +00:00
|
|
|
size_t len;
|
oom: move oom_adj value from task_struct to signal_struct
Currently, OOM logic callflow is here.
__out_of_memory()
select_bad_process() for each task
badness() calculate badness of one task
oom_kill_process() search child
oom_kill_task() kill target task and mm shared tasks with it
example, process-A have two thread, thread-A and thread-B and it have very
fat memory and each thread have following oom_adj and oom_score.
thread-A: oom_adj = OOM_DISABLE, oom_score = 0
thread-B: oom_adj = 0, oom_score = very-high
Then, select_bad_process() select thread-B, but oom_kill_task() refuse
kill the task because thread-A have OOM_DISABLE. Thus __out_of_memory()
call select_bad_process() again. but select_bad_process() select the same
task. It mean kernel fall in livelock.
The fact is, select_bad_process() must select killable task. otherwise
OOM logic go into livelock.
And root cause is, oom_adj shouldn't be per-thread value. it should be
per-process value because OOM-killer kill a process, not thread. Thus
This patch moves oomkilladj (now more appropriately named oom_adj) from
struct task_struct to struct signal_struct. it naturally prevent
select_bad_process() choose wrong task.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-22 00:03:13 +00:00
|
|
|
int oom_adjust = OOM_DISABLE;
|
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
oom: move oom_adj value from task_struct to signal_struct
Currently, OOM logic callflow is here.
__out_of_memory()
select_bad_process() for each task
badness() calculate badness of one task
oom_kill_process() search child
oom_kill_task() kill target task and mm shared tasks with it
example, process-A have two thread, thread-A and thread-B and it have very
fat memory and each thread have following oom_adj and oom_score.
thread-A: oom_adj = OOM_DISABLE, oom_score = 0
thread-B: oom_adj = 0, oom_score = very-high
Then, select_bad_process() select thread-B, but oom_kill_task() refuse
kill the task because thread-A have OOM_DISABLE. Thus __out_of_memory()
call select_bad_process() again. but select_bad_process() select the same
task. It mean kernel fall in livelock.
The fact is, select_bad_process() must select killable task. otherwise
OOM logic go into livelock.
And root cause is, oom_adj shouldn't be per-thread value. it should be
per-process value because OOM-killer kill a process, not thread. Thus
This patch moves oomkilladj (now more appropriately named oom_adj) from
struct task_struct to struct signal_struct. it naturally prevent
select_bad_process() choose wrong task.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-22 00:03:13 +00:00
|
|
|
|
|
|
|
if (lock_task_sighand(task, &flags)) {
|
|
|
|
oom_adjust = task->signal->oom_adj;
|
|
|
|
unlock_task_sighand(task, &flags);
|
|
|
|
}
|
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
|
2006-06-26 07:25:54 +00:00
|
|
|
len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
|
2007-05-08 07:31:41 +00:00
|
|
|
|
|
|
|
return simple_read_from_buffer(buf, count, ppos, buffer, len);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task;
|
2009-09-22 00:03:16 +00:00
|
|
|
char buffer[PROC_NUMBUF];
|
|
|
|
long oom_adjust;
|
oom: move oom_adj value from task_struct to signal_struct
Currently, OOM logic callflow is here.
__out_of_memory()
select_bad_process() for each task
badness() calculate badness of one task
oom_kill_process() search child
oom_kill_task() kill target task and mm shared tasks with it
example, process-A have two thread, thread-A and thread-B and it have very
fat memory and each thread have following oom_adj and oom_score.
thread-A: oom_adj = OOM_DISABLE, oom_score = 0
thread-B: oom_adj = 0, oom_score = very-high
Then, select_bad_process() select thread-B, but oom_kill_task() refuse
kill the task because thread-A have OOM_DISABLE. Thus __out_of_memory()
call select_bad_process() again. but select_bad_process() select the same
task. It mean kernel fall in livelock.
The fact is, select_bad_process() must select killable task. otherwise
OOM logic go into livelock.
And root cause is, oom_adj shouldn't be per-thread value. it should be
per-process value because OOM-killer kill a process, not thread. Thus
This patch moves oomkilladj (now more appropriately named oom_adj) from
struct task_struct to struct signal_struct. it naturally prevent
select_bad_process() choose wrong task.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-22 00:03:13 +00:00
|
|
|
unsigned long flags;
|
2009-09-22 00:03:16 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:54 +00:00
|
|
|
memset(buffer, 0, sizeof(buffer));
|
|
|
|
if (count > sizeof(buffer) - 1)
|
|
|
|
count = sizeof(buffer) - 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (copy_from_user(buffer, buf, count))
|
|
|
|
return -EFAULT;
|
2009-09-22 00:03:16 +00:00
|
|
|
|
|
|
|
err = strict_strtol(strstrip(buffer), 0, &oom_adjust);
|
|
|
|
if (err)
|
|
|
|
return -EINVAL;
|
2006-10-20 06:28:32 +00:00
|
|
|
if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
|
|
|
|
oom_adjust != OOM_DISABLE)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
2009-09-22 00:03:16 +00:00
|
|
|
|
2006-12-08 10:36:36 +00:00
|
|
|
task = get_proc_task(file->f_path.dentry->d_inode);
|
2006-06-26 07:25:55 +00:00
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
oom: move oom_adj value from task_struct to signal_struct
Currently, OOM logic callflow is here.
__out_of_memory()
select_bad_process() for each task
badness() calculate badness of one task
oom_kill_process() search child
oom_kill_task() kill target task and mm shared tasks with it
example, process-A have two thread, thread-A and thread-B and it have very
fat memory and each thread have following oom_adj and oom_score.
thread-A: oom_adj = OOM_DISABLE, oom_score = 0
thread-B: oom_adj = 0, oom_score = very-high
Then, select_bad_process() select thread-B, but oom_kill_task() refuse
kill the task because thread-A have OOM_DISABLE. Thus __out_of_memory()
call select_bad_process() again. but select_bad_process() select the same
task. It mean kernel fall in livelock.
The fact is, select_bad_process() must select killable task. otherwise
OOM logic go into livelock.
And root cause is, oom_adj shouldn't be per-thread value. it should be
per-process value because OOM-killer kill a process, not thread. Thus
This patch moves oomkilladj (now more appropriately named oom_adj) from
struct task_struct to struct signal_struct. it naturally prevent
select_bad_process() choose wrong task.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-22 00:03:13 +00:00
|
|
|
if (!lock_task_sighand(task, &flags)) {
|
|
|
|
put_task_struct(task);
|
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
|
|
|
|
unlock_task_sighand(task, &flags);
|
2006-12-07 04:32:24 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
return -EACCES;
|
|
|
|
}
|
oom: move oom_adj value from task_struct to signal_struct
Currently, OOM logic callflow is here.
__out_of_memory()
select_bad_process() for each task
badness() calculate badness of one task
oom_kill_process() search child
oom_kill_task() kill target task and mm shared tasks with it
example, process-A have two thread, thread-A and thread-B and it have very
fat memory and each thread have following oom_adj and oom_score.
thread-A: oom_adj = OOM_DISABLE, oom_score = 0
thread-B: oom_adj = 0, oom_score = very-high
Then, select_bad_process() select thread-B, but oom_kill_task() refuse
kill the task because thread-A have OOM_DISABLE. Thus __out_of_memory()
call select_bad_process() again. but select_bad_process() select the same
task. It mean kernel fall in livelock.
The fact is, select_bad_process() must select killable task. otherwise
OOM logic go into livelock.
And root cause is, oom_adj shouldn't be per-thread value. it should be
per-process value because OOM-killer kill a process, not thread. Thus
This patch moves oomkilladj (now more appropriately named oom_adj) from
struct task_struct to struct signal_struct. it naturally prevent
select_bad_process() choose wrong task.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-22 00:03:13 +00:00
|
|
|
|
|
|
|
task->signal->oom_adj = oom_adjust;
|
|
|
|
|
|
|
|
unlock_task_sighand(task, &flags);
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
2009-09-22 00:03:16 +00:00
|
|
|
|
|
|
|
return count;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_oom_adjust_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.read = oom_adjust_read,
|
|
|
|
.write = oom_adjust_write,
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
|
|
#define TMPBUFLEN 21
|
|
|
|
static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct inode * inode = file->f_path.dentry->d_inode;
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
ssize_t length;
|
|
|
|
char tmpbuf[TMPBUFLEN];
|
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
2005-04-16 22:20:36 +00:00
|
|
|
length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
|
2008-01-10 09:20:52 +00:00
|
|
|
audit_get_loginuid(task));
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
2005-04-16 22:20:36 +00:00
|
|
|
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct inode * inode = file->f_path.dentry->d_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
char *page, *tmp;
|
|
|
|
ssize_t length;
|
|
|
|
uid_t loginuid;
|
|
|
|
|
|
|
|
if (!capable(CAP_AUDIT_CONTROL))
|
|
|
|
return -EPERM;
|
|
|
|
|
2006-06-26 07:25:56 +00:00
|
|
|
if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EPERM;
|
|
|
|
|
2006-05-18 12:28:02 +00:00
|
|
|
if (count >= PAGE_SIZE)
|
|
|
|
count = PAGE_SIZE - 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (*ppos != 0) {
|
|
|
|
/* No partial writes. */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2007-10-16 08:25:52 +00:00
|
|
|
page = (char*)__get_free_page(GFP_TEMPORARY);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!page)
|
|
|
|
return -ENOMEM;
|
|
|
|
length = -EFAULT;
|
|
|
|
if (copy_from_user(page, buf, count))
|
|
|
|
goto out_free_page;
|
|
|
|
|
2006-05-18 12:28:02 +00:00
|
|
|
page[count] = '\0';
|
2005-04-16 22:20:36 +00:00
|
|
|
loginuid = simple_strtoul(page, &tmp, 10);
|
|
|
|
if (tmp == page) {
|
|
|
|
length = -EINVAL;
|
|
|
|
goto out_free_page;
|
|
|
|
|
|
|
|
}
|
2006-06-26 07:25:55 +00:00
|
|
|
length = audit_set_loginuid(current, loginuid);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (likely(length == 0))
|
|
|
|
length = count;
|
|
|
|
|
|
|
|
out_free_page:
|
|
|
|
free_page((unsigned long) page);
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_loginuid_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.read = proc_loginuid_read,
|
|
|
|
.write = proc_loginuid_write,
|
|
|
|
};
|
2008-03-13 12:15:31 +00:00
|
|
|
|
|
|
|
static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct inode * inode = file->f_path.dentry->d_inode;
|
|
|
|
struct task_struct *task = get_proc_task(inode);
|
|
|
|
ssize_t length;
|
|
|
|
char tmpbuf[TMPBUFLEN];
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
|
|
|
length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
|
|
|
|
audit_get_sessionid(task));
|
|
|
|
put_task_struct(task);
|
|
|
|
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_sessionid_operations = {
|
|
|
|
.read = proc_sessionid_read,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
2006-12-08 10:39:47 +00:00
|
|
|
#ifdef CONFIG_FAULT_INJECTION
|
|
|
|
static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
|
|
|
|
char buffer[PROC_NUMBUF];
|
|
|
|
size_t len;
|
|
|
|
int make_it_fail;
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
|
|
|
make_it_fail = task->make_it_fail;
|
|
|
|
put_task_struct(task);
|
|
|
|
|
|
|
|
len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
|
2007-05-08 07:31:41 +00:00
|
|
|
|
|
|
|
return simple_read_from_buffer(buf, count, ppos, buffer, len);
|
2006-12-08 10:39:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t proc_fault_inject_write(struct file * file,
|
|
|
|
const char __user * buf, size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct task_struct *task;
|
|
|
|
char buffer[PROC_NUMBUF], *end;
|
|
|
|
int make_it_fail;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_RESOURCE))
|
|
|
|
return -EPERM;
|
|
|
|
memset(buffer, 0, sizeof(buffer));
|
|
|
|
if (count > sizeof(buffer) - 1)
|
|
|
|
count = sizeof(buffer) - 1;
|
|
|
|
if (copy_from_user(buffer, buf, count))
|
|
|
|
return -EFAULT;
|
|
|
|
make_it_fail = simple_strtol(buffer, &end, 0);
|
|
|
|
if (*end == '\n')
|
|
|
|
end++;
|
|
|
|
task = get_proc_task(file->f_dentry->d_inode);
|
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
|
|
|
task->make_it_fail = make_it_fail;
|
|
|
|
put_task_struct(task);
|
|
|
|
if (end - buffer == 0)
|
|
|
|
return -EIO;
|
|
|
|
return end - buffer;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_fault_inject_operations = {
|
2006-12-08 10:39:47 +00:00
|
|
|
.read = proc_fault_inject_read,
|
|
|
|
.write = proc_fault_inject_write,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2008-01-25 20:08:34 +00:00
|
|
|
|
2007-07-09 16:52:00 +00:00
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
|
|
/*
|
|
|
|
* Print out various scheduling related per-task fields:
|
|
|
|
*/
|
|
|
|
static int sched_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct inode *inode = m->private;
|
|
|
|
struct task_struct *p;
|
|
|
|
|
|
|
|
p = get_proc_task(inode);
|
|
|
|
if (!p)
|
|
|
|
return -ESRCH;
|
|
|
|
proc_sched_show_task(p, m);
|
|
|
|
|
|
|
|
put_task_struct(p);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
sched_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *offset)
|
|
|
|
{
|
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
|
|
|
struct task_struct *p;
|
|
|
|
|
|
|
|
p = get_proc_task(inode);
|
|
|
|
if (!p)
|
|
|
|
return -ESRCH;
|
|
|
|
proc_sched_set_task(p);
|
|
|
|
|
|
|
|
put_task_struct(p);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sched_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = single_open(filp, sched_show, NULL);
|
|
|
|
if (!ret) {
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
|
|
|
|
m->private = inode;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_pid_sched_operations = {
|
|
|
|
.open = sched_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = sched_write,
|
|
|
|
.llseek = seq_lseek,
|
2007-07-31 07:38:50 +00:00
|
|
|
.release = single_release,
|
2007-07-09 16:52:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2008-04-29 08:01:36 +00:00
|
|
|
/*
|
|
|
|
* We added or removed a vma mapping the executable. The vmas are only mapped
|
|
|
|
* during exec and are not mapped with the mmap system call.
|
|
|
|
* Callers must hold down_write() on the mm's mmap_sem for these
|
|
|
|
*/
|
|
|
|
void added_exe_file_vma(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
mm->num_exe_file_vmas++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void removed_exe_file_vma(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
mm->num_exe_file_vmas--;
|
|
|
|
if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
|
|
|
|
fput(mm->exe_file);
|
|
|
|
mm->exe_file = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
|
|
|
|
{
|
|
|
|
if (new_exe_file)
|
|
|
|
get_file(new_exe_file);
|
|
|
|
if (mm->exe_file)
|
|
|
|
fput(mm->exe_file);
|
|
|
|
mm->exe_file = new_exe_file;
|
|
|
|
mm->num_exe_file_vmas = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct file *get_mm_exe_file(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct file *exe_file;
|
|
|
|
|
|
|
|
/* We need mmap_sem to protect against races with removal of
|
|
|
|
* VM_EXECUTABLE vmas */
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
exe_file = mm->exe_file;
|
|
|
|
if (exe_file)
|
|
|
|
get_file(exe_file);
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
return exe_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
|
|
|
|
{
|
|
|
|
/* It's safe to write the exe_file pointer without exe_file_lock because
|
|
|
|
* this is called during fork when the task is not yet in /proc */
|
|
|
|
newmm->exe_file = get_mm_exe_file(oldmm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int proc_exe_link(struct inode *inode, struct path *exe_path)
|
|
|
|
{
|
|
|
|
struct task_struct *task;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
struct file *exe_file;
|
|
|
|
|
|
|
|
task = get_proc_task(inode);
|
|
|
|
if (!task)
|
|
|
|
return -ENOENT;
|
|
|
|
mm = get_task_mm(task);
|
|
|
|
put_task_struct(task);
|
|
|
|
if (!mm)
|
|
|
|
return -ENOENT;
|
|
|
|
exe_file = get_mm_exe_file(mm);
|
|
|
|
mmput(mm);
|
|
|
|
if (exe_file) {
|
|
|
|
*exe_path = exe_file->f_path;
|
|
|
|
path_get(&exe_file->f_path);
|
|
|
|
fput(exe_file);
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
[PATCH] Fix up symlink function pointers
This fixes up the symlink functions for the calling convention change:
* afs, autofs4, befs, devfs, freevxfs, jffs2, jfs, ncpfs, procfs,
smbfs, sysvfs, ufs, xfs - prototype change for ->follow_link()
* befs, smbfs, xfs - same for ->put_link()
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-08-19 23:17:39 +00:00
|
|
|
static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
int error = -EACCES;
|
|
|
|
|
|
|
|
/* We don't need a base pointer in the /proc filesystem */
|
2008-02-15 03:34:35 +00:00
|
|
|
path_put(&nd->path);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:58 +00:00
|
|
|
/* Are we allowed to snoop on the tasks file descriptors? */
|
|
|
|
if (!proc_fd_access_allowed(inode))
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
|
2005-04-16 22:20:36 +00:00
|
|
|
nd->last_type = LAST_BIND;
|
|
|
|
out:
|
[PATCH] Fix up symlink function pointers
This fixes up the symlink functions for the calling convention change:
* afs, autofs4, befs, devfs, freevxfs, jffs2, jfs, ncpfs, procfs,
smbfs, sysvfs, ufs, xfs - prototype change for ->follow_link()
* befs, smbfs, xfs - same for ->put_link()
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-08-19 23:17:39 +00:00
|
|
|
return ERR_PTR(error);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-10-16 08:25:52 +00:00
|
|
|
char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
|
2008-02-15 03:38:35 +00:00
|
|
|
char *pathname;
|
2005-04-16 22:20:36 +00:00
|
|
|
int len;
|
|
|
|
|
|
|
|
if (!tmp)
|
|
|
|
return -ENOMEM;
|
2007-05-08 07:31:41 +00:00
|
|
|
|
2008-02-15 03:38:44 +00:00
|
|
|
pathname = d_path(path, tmp, PAGE_SIZE);
|
2008-02-15 03:38:35 +00:00
|
|
|
len = PTR_ERR(pathname);
|
|
|
|
if (IS_ERR(pathname))
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
2008-02-15 03:38:35 +00:00
|
|
|
len = tmp + PAGE_SIZE - 1 - pathname;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (len > buflen)
|
|
|
|
len = buflen;
|
2008-02-15 03:38:35 +00:00
|
|
|
if (copy_to_user(buffer, pathname, len))
|
2005-04-16 22:20:36 +00:00
|
|
|
len = -EFAULT;
|
|
|
|
out:
|
|
|
|
free_page((unsigned long)tmp);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
|
|
|
|
{
|
|
|
|
int error = -EACCES;
|
|
|
|
struct inode *inode = dentry->d_inode;
|
2008-02-15 03:38:35 +00:00
|
|
|
struct path path;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:58 +00:00
|
|
|
/* Are we allowed to snoop on the tasks file descriptors? */
|
|
|
|
if (!proc_fd_access_allowed(inode))
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
error = PROC_I(inode)->op.proc_get_link(inode, &path);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
error = do_proc_readlink(&path, buffer, buflen);
|
|
|
|
path_put(&path);
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_pid_link_inode_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.readlink = proc_pid_readlink,
|
2006-07-15 19:26:45 +00:00
|
|
|
.follow_link = proc_pid_follow_link,
|
|
|
|
.setattr = proc_setattr,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
|
|
|
|
/* building an inode */
|
|
|
|
|
|
|
|
static int task_dumpable(struct task_struct *task)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-10-02 09:17:05 +00:00
|
|
|
int dumpable = 0;
|
|
|
|
struct mm_struct *mm;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
task_lock(task);
|
|
|
|
mm = task->mm;
|
|
|
|
if (mm)
|
2007-07-19 08:48:27 +00:00
|
|
|
dumpable = get_dumpable(mm);
|
2006-10-02 09:17:05 +00:00
|
|
|
task_unlock(task);
|
|
|
|
if(dumpable == 1)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
|
2006-10-02 09:17:05 +00:00
|
|
|
{
|
|
|
|
struct inode * inode;
|
|
|
|
struct proc_inode *ei;
|
2008-11-13 23:39:19 +00:00
|
|
|
const struct cred *cred;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
/* We need a new inode */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
inode = new_inode(sb);
|
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Common stuff */
|
|
|
|
ei = PROC_I(inode);
|
|
|
|
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
inode->i_op = &proc_def_inode_operations;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* grab the reference to task.
|
|
|
|
*/
|
2006-10-02 09:18:59 +00:00
|
|
|
ei->pid = get_task_pid(task, PIDTYPE_PID);
|
2006-10-02 09:17:05 +00:00
|
|
|
if (!ei->pid)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (task_dumpable(task)) {
|
2008-11-13 23:39:19 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
cred = __task_cred(task);
|
|
|
|
inode->i_uid = cred->euid;
|
|
|
|
inode->i_gid = cred->egid;
|
|
|
|
rcu_read_unlock();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-10-02 09:17:05 +00:00
|
|
|
security_task_to_inode(task, inode);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
2006-10-02 09:17:05 +00:00
|
|
|
return inode;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
iput(inode);
|
|
|
|
return NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
2006-10-02 09:17:05 +00:00
|
|
|
struct task_struct *task;
|
2008-11-13 23:39:19 +00:00
|
|
|
const struct cred *cred;
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
generic_fillattr(inode, stat);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
stat->uid = 0;
|
|
|
|
stat->gid = 0;
|
|
|
|
task = pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
|
|
if (task) {
|
|
|
|
if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
|
|
|
|
task_dumpable(task)) {
|
2008-11-13 23:39:19 +00:00
|
|
|
cred = __task_cred(task);
|
|
|
|
stat->uid = cred->euid;
|
|
|
|
stat->gid = cred->egid;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2006-10-02 09:17:05 +00:00
|
|
|
rcu_read_unlock();
|
2005-06-23 07:09:43 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* dentry stuff */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Exceptional case: normally we are not allowed to unhash a busy
|
|
|
|
* directory. In this case, however, we can do it - no aliasing problems
|
|
|
|
* due to the way we treat inodes.
|
|
|
|
*
|
|
|
|
* Rewrite the inode's ownerships here because the owning task may have
|
|
|
|
* performed a setuid(), etc.
|
2006-06-26 07:25:55 +00:00
|
|
|
*
|
|
|
|
* Before the /proc/pid/status file was created the only way to read
|
|
|
|
* the effective uid of a /process was to stat /proc/pid. Reading
|
|
|
|
* /proc/pid/status is slow enough that procps and other packages
|
|
|
|
* kept stating /proc/pid. To keep the rules in /proc simple I have
|
|
|
|
* made this apply to all per process world readable and executable
|
|
|
|
* directories.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2008-11-13 23:39:19 +00:00
|
|
|
const struct cred *cred;
|
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (task) {
|
|
|
|
if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
|
|
|
|
task_dumpable(task)) {
|
2008-11-13 23:39:19 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
cred = __task_cred(task);
|
|
|
|
inode->i_uid = cred->euid;
|
|
|
|
inode->i_gid = cred->egid;
|
|
|
|
rcu_read_unlock();
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
inode->i_uid = 0;
|
|
|
|
inode->i_gid = 0;
|
|
|
|
}
|
2006-07-15 04:48:03 +00:00
|
|
|
inode->i_mode &= ~(S_ISUID | S_ISGID);
|
2005-04-16 22:20:36 +00:00
|
|
|
security_task_to_inode(task, inode);
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
d_drop(dentry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static int pid_delete_dentry(struct dentry * dentry)
|
2006-06-26 07:25:55 +00:00
|
|
|
{
|
2006-10-02 09:17:05 +00:00
|
|
|
/* Is the task we represent dead?
|
|
|
|
* If so, then don't put the dentry on the lru list,
|
|
|
|
* kill it immediately.
|
|
|
|
*/
|
|
|
|
return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
|
|
|
|
}
|
|
|
|
|
2009-02-20 05:58:47 +00:00
|
|
|
static const struct dentry_operations pid_dentry_operations =
|
2006-10-02 09:17:05 +00:00
|
|
|
{
|
|
|
|
.d_revalidate = pid_revalidate,
|
|
|
|
.d_delete = pid_delete_dentry,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Lookups */
|
|
|
|
|
2007-05-08 07:26:15 +00:00
|
|
|
typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
|
|
|
|
struct task_struct *, const void *);
|
2006-10-02 09:18:49 +00:00
|
|
|
|
2006-10-02 09:18:57 +00:00
|
|
|
/*
|
|
|
|
* Fill a directory entry.
|
|
|
|
*
|
|
|
|
* If possible create the dcache entry and derive our inode number and
|
|
|
|
* file type from dcache entry.
|
|
|
|
*
|
|
|
|
* Since all of the proc inode numbers are dynamically generated, the inode
|
|
|
|
* numbers do not exist until the inode is cache. This means creating the
|
|
|
|
* the dcache entry in readdir is necessary to keep the inode numbers
|
|
|
|
* reported by readdir in sync with the inode numbers reported
|
|
|
|
* by stat.
|
|
|
|
*/
|
2006-10-02 09:18:49 +00:00
|
|
|
static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
|
|
|
|
char *name, int len,
|
2007-05-08 07:26:15 +00:00
|
|
|
instantiate_t instantiate, struct task_struct *task, const void *ptr)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct dentry *child, *dir = filp->f_path.dentry;
|
2006-10-02 09:18:49 +00:00
|
|
|
struct inode *inode;
|
|
|
|
struct qstr qname;
|
|
|
|
ino_t ino = 0;
|
|
|
|
unsigned type = DT_UNKNOWN;
|
|
|
|
|
|
|
|
qname.name = name;
|
|
|
|
qname.len = len;
|
|
|
|
qname.hash = full_name_hash(name, len);
|
|
|
|
|
|
|
|
child = d_lookup(dir, &qname);
|
|
|
|
if (!child) {
|
|
|
|
struct dentry *new;
|
|
|
|
new = d_alloc(dir, &qname);
|
|
|
|
if (new) {
|
|
|
|
child = instantiate(dir->d_inode, new, task, ptr);
|
|
|
|
if (child)
|
|
|
|
dput(new);
|
|
|
|
else
|
|
|
|
child = new;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!child || IS_ERR(child) || !child->d_inode)
|
|
|
|
goto end_instantiate;
|
|
|
|
inode = child->d_inode;
|
|
|
|
if (inode) {
|
|
|
|
ino = inode->i_ino;
|
|
|
|
type = inode->i_mode >> 12;
|
|
|
|
}
|
|
|
|
dput(child);
|
|
|
|
end_instantiate:
|
|
|
|
if (!ino)
|
|
|
|
ino = find_inode_number(dir, &qname);
|
|
|
|
if (!ino)
|
|
|
|
ino = 1;
|
|
|
|
return filldir(dirent, name, len, filp->f_pos, ino, type);
|
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static unsigned name_to_int(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
const char *name = dentry->d_name.name;
|
|
|
|
int len = dentry->d_name.len;
|
|
|
|
unsigned n = 0;
|
|
|
|
|
|
|
|
if (len > 1 && *name == '0')
|
|
|
|
goto out;
|
|
|
|
while (len-- > 0) {
|
|
|
|
unsigned c = *name++ - '0';
|
|
|
|
if (c > 9)
|
|
|
|
goto out;
|
|
|
|
if (n >= (~0U-9)/10)
|
|
|
|
goto out;
|
|
|
|
n *= 10;
|
|
|
|
n += c;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
out:
|
|
|
|
return ~0U;
|
|
|
|
}
|
|
|
|
|
2007-05-08 07:26:17 +00:00
|
|
|
#define PROC_FDINFO_MAX 64
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
static int proc_fd_info(struct inode *inode, struct path *path, char *info)
|
2006-10-02 09:17:05 +00:00
|
|
|
{
|
|
|
|
struct task_struct *task = get_proc_task(inode);
|
|
|
|
struct files_struct *files = NULL;
|
|
|
|
struct file *file;
|
|
|
|
int fd = proc_fd(inode);
|
2006-06-26 07:25:55 +00:00
|
|
|
|
|
|
|
if (task) {
|
2006-10-02 09:17:05 +00:00
|
|
|
files = get_files_struct(task);
|
|
|
|
put_task_struct(task);
|
|
|
|
}
|
|
|
|
if (files) {
|
|
|
|
/*
|
|
|
|
* We are not taking a ref to the file structure, so we must
|
|
|
|
* hold ->file_lock.
|
|
|
|
*/
|
|
|
|
spin_lock(&files->file_lock);
|
|
|
|
file = fcheck_files(files, fd);
|
|
|
|
if (file) {
|
2008-02-15 03:38:35 +00:00
|
|
|
if (path) {
|
|
|
|
*path = file->f_path;
|
|
|
|
path_get(&file->f_path);
|
|
|
|
}
|
2007-05-08 07:26:17 +00:00
|
|
|
if (info)
|
|
|
|
snprintf(info, PROC_FDINFO_MAX,
|
|
|
|
"pos:\t%lli\n"
|
|
|
|
"flags:\t0%o\n",
|
|
|
|
(long long) file->f_pos,
|
|
|
|
file->f_flags);
|
2006-10-02 09:17:05 +00:00
|
|
|
spin_unlock(&files->file_lock);
|
|
|
|
put_files_struct(files);
|
|
|
|
return 0;
|
2006-06-26 07:25:55 +00:00
|
|
|
}
|
2006-10-02 09:17:05 +00:00
|
|
|
spin_unlock(&files->file_lock);
|
|
|
|
put_files_struct(files);
|
2006-06-26 07:25:55 +00:00
|
|
|
}
|
2006-10-02 09:17:05 +00:00
|
|
|
return -ENOENT;
|
2006-06-26 07:25:55 +00:00
|
|
|
}
|
|
|
|
|
2008-02-15 03:38:35 +00:00
|
|
|
static int proc_fd_link(struct inode *inode, struct path *path)
|
2007-05-08 07:26:17 +00:00
|
|
|
{
|
2008-02-15 03:38:35 +00:00
|
|
|
return proc_fd_info(inode, path, NULL);
|
2007-05-08 07:26:17 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2006-06-26 07:25:44 +00:00
|
|
|
int fd = proc_fd(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct files_struct *files;
|
2008-11-13 23:39:19 +00:00
|
|
|
const struct cred *cred;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (task) {
|
|
|
|
files = get_files_struct(task);
|
|
|
|
if (files) {
|
|
|
|
rcu_read_lock();
|
|
|
|
if (fcheck_files(files, fd)) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
put_files_struct(files);
|
|
|
|
if (task_dumpable(task)) {
|
2008-11-13 23:39:19 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
cred = __task_cred(task);
|
|
|
|
inode->i_uid = cred->euid;
|
|
|
|
inode->i_gid = cred->egid;
|
|
|
|
rcu_read_unlock();
|
2006-06-26 07:25:55 +00:00
|
|
|
} else {
|
|
|
|
inode->i_uid = 0;
|
|
|
|
inode->i_gid = 0;
|
|
|
|
}
|
2006-07-15 04:48:03 +00:00
|
|
|
inode->i_mode &= ~(S_ISUID | S_ISGID);
|
2006-06-26 07:25:55 +00:00
|
|
|
security_task_to_inode(task, inode);
|
|
|
|
put_task_struct(task);
|
|
|
|
return 1;
|
|
|
|
}
|
2005-09-09 20:04:14 +00:00
|
|
|
rcu_read_unlock();
|
2005-04-16 22:20:36 +00:00
|
|
|
put_files_struct(files);
|
|
|
|
}
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
d_drop(dentry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-02-20 05:58:47 +00:00
|
|
|
static const struct dentry_operations tid_fd_dentry_operations =
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
.d_revalidate = tid_fd_revalidate,
|
|
|
|
.d_delete = pid_delete_dentry,
|
|
|
|
};
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static struct dentry *proc_fd_instantiate(struct inode *dir,
|
2007-05-08 07:26:15 +00:00
|
|
|
struct dentry *dentry, struct task_struct *task, const void *ptr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-05-08 07:26:15 +00:00
|
|
|
unsigned fd = *(const unsigned *)ptr;
|
2006-10-02 09:18:49 +00:00
|
|
|
struct file *file;
|
|
|
|
struct files_struct *files;
|
|
|
|
struct inode *inode;
|
|
|
|
struct proc_inode *ei;
|
|
|
|
struct dentry *error = ERR_PTR(-ENOENT);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
inode = proc_pid_make_inode(dir->i_sb, task);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
ei = PROC_I(inode);
|
2006-06-26 07:25:44 +00:00
|
|
|
ei->fd = fd;
|
2005-04-16 22:20:36 +00:00
|
|
|
files = get_files_struct(task);
|
|
|
|
if (!files)
|
2006-10-02 09:18:49 +00:00
|
|
|
goto out_iput;
|
2005-04-16 22:20:36 +00:00
|
|
|
inode->i_mode = S_IFLNK;
|
2006-04-19 05:21:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We are not taking a ref to the file structure, so we must
|
|
|
|
* hold ->file_lock.
|
|
|
|
*/
|
|
|
|
spin_lock(&files->file_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
file = fcheck_files(files, fd);
|
|
|
|
if (!file)
|
2006-10-02 09:18:49 +00:00
|
|
|
goto out_unlock;
|
2008-09-02 19:28:45 +00:00
|
|
|
if (file->f_mode & FMODE_READ)
|
2005-04-16 22:20:36 +00:00
|
|
|
inode->i_mode |= S_IRUSR | S_IXUSR;
|
2008-09-02 19:28:45 +00:00
|
|
|
if (file->f_mode & FMODE_WRITE)
|
2005-04-16 22:20:36 +00:00
|
|
|
inode->i_mode |= S_IWUSR | S_IXUSR;
|
2006-04-19 05:21:46 +00:00
|
|
|
spin_unlock(&files->file_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
put_files_struct(files);
|
2006-10-02 09:18:49 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
inode->i_op = &proc_pid_link_inode_operations;
|
|
|
|
inode->i_size = 64;
|
|
|
|
ei->op.proc_get_link = proc_fd_link;
|
|
|
|
dentry->d_op = &tid_fd_dentry_operations;
|
|
|
|
d_add(dentry, inode);
|
2006-06-26 07:25:49 +00:00
|
|
|
/* Close the race of the process dying before we return the dentry */
|
|
|
|
if (tid_fd_revalidate(dentry, NULL))
|
2006-10-02 09:18:49 +00:00
|
|
|
error = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
out_unlock:
|
2006-04-19 05:21:46 +00:00
|
|
|
spin_unlock(&files->file_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
put_files_struct(files);
|
2006-10-02 09:18:49 +00:00
|
|
|
out_iput:
|
2005-04-16 22:20:36 +00:00
|
|
|
iput(inode);
|
2006-06-26 07:25:49 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-05-08 07:26:17 +00:00
|
|
|
static struct dentry *proc_lookupfd_common(struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
instantiate_t instantiate)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
|
|
|
struct task_struct *task = get_proc_task(dir);
|
|
|
|
unsigned fd = name_to_int(dentry);
|
|
|
|
struct dentry *result = ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
if (fd == ~0U)
|
|
|
|
goto out;
|
|
|
|
|
2007-05-08 07:26:17 +00:00
|
|
|
result = instantiate(dir, dentry, task, &fd);
|
2006-10-02 09:18:49 +00:00
|
|
|
out:
|
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-05-08 07:26:17 +00:00
|
|
|
static int proc_readfd_common(struct file * filp, void * dirent,
|
|
|
|
filldir_t filldir, instantiate_t instantiate)
|
2006-10-02 09:17:05 +00:00
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct dentry *dentry = filp->f_path.dentry;
|
2006-10-02 09:17:05 +00:00
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
struct task_struct *p = get_proc_task(inode);
|
2007-10-19 06:40:43 +00:00
|
|
|
unsigned int fd, ino;
|
2006-10-02 09:17:05 +00:00
|
|
|
int retval;
|
|
|
|
struct files_struct * files;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
retval = -ENOENT;
|
|
|
|
if (!p)
|
|
|
|
goto out_no_task;
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
fd = filp->f_pos;
|
|
|
|
switch (fd) {
|
|
|
|
case 0:
|
|
|
|
if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
|
|
|
|
goto out;
|
|
|
|
filp->f_pos++;
|
|
|
|
case 1:
|
|
|
|
ino = parent_ino(dentry);
|
|
|
|
if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
|
|
|
|
goto out;
|
|
|
|
filp->f_pos++;
|
|
|
|
default:
|
|
|
|
files = get_files_struct(p);
|
|
|
|
if (!files)
|
|
|
|
goto out;
|
|
|
|
rcu_read_lock();
|
|
|
|
for (fd = filp->f_pos-2;
|
2008-04-22 05:32:44 +00:00
|
|
|
fd < files_fdtable(files)->max_fds;
|
2006-10-02 09:17:05 +00:00
|
|
|
fd++, filp->f_pos++) {
|
2007-05-08 07:26:17 +00:00
|
|
|
char name[PROC_NUMBUF];
|
|
|
|
int len;
|
2006-10-02 09:17:05 +00:00
|
|
|
|
|
|
|
if (!fcheck_files(files, fd))
|
|
|
|
continue;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2007-05-08 07:26:17 +00:00
|
|
|
len = snprintf(name, sizeof(name), "%d", fd);
|
|
|
|
if (proc_fill_cache(filp, dirent, filldir,
|
|
|
|
name, len, instantiate,
|
|
|
|
p, &fd) < 0) {
|
2006-10-02 09:17:05 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_lock();
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
put_files_struct(files);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
put_task_struct(p);
|
|
|
|
out_no_task:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2007-05-08 07:26:17 +00:00
|
|
|
static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct nameidata *nd)
|
|
|
|
{
|
|
|
|
return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
|
|
|
|
{
|
|
|
|
return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
|
|
|
|
size_t len, loff_t *ppos)
|
|
|
|
{
|
|
|
|
char tmp[PROC_FDINFO_MAX];
|
2008-02-15 03:38:35 +00:00
|
|
|
int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp);
|
2007-05-08 07:26:17 +00:00
|
|
|
if (!err)
|
|
|
|
err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp));
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_fdinfo_file_operations = {
|
|
|
|
.open = nonseekable_open,
|
|
|
|
.read = proc_fdinfo_read,
|
|
|
|
};
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_fd_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.read = generic_read_dir,
|
|
|
|
.readdir = proc_readfd,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2007-05-08 07:23:35 +00:00
|
|
|
/*
|
|
|
|
* /proc/pid/fd needs a special permission handler so that a process can still
|
|
|
|
* access /proc/self/fd after it has executed a setuid().
|
|
|
|
*/
|
2008-07-16 01:03:57 +00:00
|
|
|
static int proc_fd_permission(struct inode *inode, int mask)
|
2007-05-08 07:23:35 +00:00
|
|
|
{
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
rv = generic_permission(inode, mask, NULL);
|
|
|
|
if (rv == 0)
|
|
|
|
return 0;
|
|
|
|
if (task_pid(current) == proc_pid(inode))
|
|
|
|
rv = 0;
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* proc directories can do almost nothing..
|
|
|
|
*/
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_fd_inode_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.lookup = proc_lookupfd,
|
2007-05-08 07:23:35 +00:00
|
|
|
.permission = proc_fd_permission,
|
2006-07-15 19:26:45 +00:00
|
|
|
.setattr = proc_setattr,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2007-05-08 07:26:17 +00:00
|
|
|
static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
|
|
|
|
struct dentry *dentry, struct task_struct *task, const void *ptr)
|
|
|
|
{
|
|
|
|
unsigned fd = *(unsigned *)ptr;
|
|
|
|
struct inode *inode;
|
|
|
|
struct proc_inode *ei;
|
|
|
|
struct dentry *error = ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
inode = proc_pid_make_inode(dir->i_sb, task);
|
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
ei = PROC_I(inode);
|
|
|
|
ei->fd = fd;
|
|
|
|
inode->i_mode = S_IFREG | S_IRUSR;
|
|
|
|
inode->i_fop = &proc_fdinfo_file_operations;
|
|
|
|
dentry->d_op = &tid_fd_dentry_operations;
|
|
|
|
d_add(dentry, inode);
|
|
|
|
/* Close the race of the process dying before we return the dentry */
|
|
|
|
if (tid_fd_revalidate(dentry, NULL))
|
|
|
|
error = NULL;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dentry *proc_lookupfdinfo(struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
struct nameidata *nd)
|
|
|
|
{
|
|
|
|
return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
|
|
|
|
{
|
|
|
|
return proc_readfd_common(filp, dirent, filldir,
|
|
|
|
proc_fdinfo_instantiate);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_fdinfo_operations = {
|
|
|
|
.read = generic_read_dir,
|
|
|
|
.readdir = proc_readfdinfo,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* proc directories can do almost nothing..
|
|
|
|
*/
|
|
|
|
static const struct inode_operations proc_fdinfo_inode_operations = {
|
|
|
|
.lookup = proc_lookupfdinfo,
|
|
|
|
.setattr = proc_setattr,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static struct dentry *proc_pident_instantiate(struct inode *dir,
|
2007-05-08 07:26:15 +00:00
|
|
|
struct dentry *dentry, struct task_struct *task, const void *ptr)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *p = ptr;
|
2006-10-02 09:18:49 +00:00
|
|
|
struct inode *inode;
|
|
|
|
struct proc_inode *ei;
|
2009-05-28 21:34:21 +00:00
|
|
|
struct dentry *error = ERR_PTR(-ENOENT);
|
2006-10-02 09:18:49 +00:00
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
inode = proc_pid_make_inode(dir->i_sb, task);
|
2006-10-02 09:18:49 +00:00
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ei = PROC_I(inode);
|
|
|
|
inode->i_mode = p->mode;
|
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
inode->i_nlink = 2; /* Use getattr to fix if necessary */
|
|
|
|
if (p->iop)
|
|
|
|
inode->i_op = p->iop;
|
|
|
|
if (p->fop)
|
|
|
|
inode->i_fop = p->fop;
|
|
|
|
ei->op = p->op;
|
|
|
|
dentry->d_op = &pid_dentry_operations;
|
|
|
|
d_add(dentry, inode);
|
|
|
|
/* Close the race of the process dying before we return the dentry */
|
|
|
|
if (pid_revalidate(dentry, NULL))
|
|
|
|
error = NULL;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct dentry *proc_pident_lookup(struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *ents,
|
2006-10-02 09:18:56 +00:00
|
|
|
unsigned int nents)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-06-26 07:25:49 +00:00
|
|
|
struct dentry *error;
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *task = get_proc_task(dir);
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *p, *last;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:49 +00:00
|
|
|
error = ERR_PTR(-ENOENT);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:07 +00:00
|
|
|
/*
|
|
|
|
* Yes, it does not scale. And it should not. Don't add
|
|
|
|
* new entries into /proc/<tgid>/ without very good reasons.
|
|
|
|
*/
|
2006-10-02 09:18:56 +00:00
|
|
|
last = &ents[nents - 1];
|
|
|
|
for (p = ents; p <= last; p++) {
|
2005-04-16 22:20:36 +00:00
|
|
|
if (p->len != dentry->d_name.len)
|
|
|
|
continue;
|
|
|
|
if (!memcmp(dentry->d_name.name, p->name, p->len))
|
|
|
|
break;
|
|
|
|
}
|
2006-10-02 09:18:56 +00:00
|
|
|
if (p > last)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
error = proc_pident_instantiate(dir, dentry, task, p);
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
2006-06-26 07:25:49 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-05-08 07:26:15 +00:00
|
|
|
static int proc_pident_fill_cache(struct file *filp, void *dirent,
|
|
|
|
filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
|
|
|
return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
|
|
|
|
proc_pident_instantiate, task, p);
|
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static int proc_pident_readdir(struct file *filp,
|
|
|
|
void *dirent, filldir_t filldir,
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *ents, unsigned int nents)
|
2006-10-02 09:17:05 +00:00
|
|
|
{
|
|
|
|
int i;
|
2006-12-08 10:36:36 +00:00
|
|
|
struct dentry *dentry = filp->f_path.dentry;
|
2006-10-02 09:17:05 +00:00
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
struct task_struct *task = get_proc_task(inode);
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *p, *last;
|
2006-10-02 09:17:05 +00:00
|
|
|
ino_t ino;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = -ENOENT;
|
|
|
|
if (!task)
|
2006-10-02 09:18:49 +00:00
|
|
|
goto out_no_task;
|
2006-10-02 09:17:05 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
i = filp->f_pos;
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
|
|
|
ino = inode->i_ino;
|
|
|
|
if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
|
|
|
|
goto out;
|
|
|
|
i++;
|
|
|
|
filp->f_pos++;
|
|
|
|
/* fall through */
|
|
|
|
case 1:
|
|
|
|
ino = parent_ino(dentry);
|
|
|
|
if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
|
|
|
|
goto out;
|
|
|
|
i++;
|
|
|
|
filp->f_pos++;
|
|
|
|
/* fall through */
|
|
|
|
default:
|
|
|
|
i -= 2;
|
|
|
|
if (i >= nents) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
p = ents + i;
|
2006-10-02 09:18:56 +00:00
|
|
|
last = &ents[nents - 1];
|
|
|
|
while (p <= last) {
|
2006-10-02 09:18:49 +00:00
|
|
|
if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
|
2006-10-02 09:17:05 +00:00
|
|
|
goto out;
|
|
|
|
filp->f_pos++;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
out:
|
2006-10-02 09:18:49 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
2006-10-02 09:17:05 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
#ifdef CONFIG_SECURITY
|
|
|
|
static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct inode * inode = file->f_path.dentry->d_inode;
|
2007-03-12 16:17:58 +00:00
|
|
|
char *p = NULL;
|
2006-10-02 09:17:05 +00:00
|
|
|
ssize_t length;
|
|
|
|
struct task_struct *task = get_proc_task(inode);
|
|
|
|
|
|
|
|
if (!task)
|
2007-03-12 16:17:58 +00:00
|
|
|
return -ESRCH;
|
2006-10-02 09:17:05 +00:00
|
|
|
|
|
|
|
length = security_getprocattr(task,
|
2006-12-08 10:36:36 +00:00
|
|
|
(char*)file->f_path.dentry->d_name.name,
|
2007-03-12 16:17:58 +00:00
|
|
|
&p);
|
2006-10-02 09:17:05 +00:00
|
|
|
put_task_struct(task);
|
2007-03-12 16:17:58 +00:00
|
|
|
if (length > 0)
|
|
|
|
length = simple_read_from_buffer(buf, count, ppos, p, length);
|
|
|
|
kfree(p);
|
2006-10-02 09:17:05 +00:00
|
|
|
return length;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct inode * inode = file->f_path.dentry->d_inode;
|
2006-10-02 09:17:05 +00:00
|
|
|
char *page;
|
|
|
|
ssize_t length;
|
|
|
|
struct task_struct *task = get_proc_task(inode);
|
|
|
|
|
|
|
|
length = -ESRCH;
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
if (count > PAGE_SIZE)
|
|
|
|
count = PAGE_SIZE;
|
|
|
|
|
|
|
|
/* No partial writes. */
|
|
|
|
length = -EINVAL;
|
|
|
|
if (*ppos != 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
length = -ENOMEM;
|
2007-10-16 08:25:52 +00:00
|
|
|
page = (char*)__get_free_page(GFP_TEMPORARY);
|
2006-10-02 09:17:05 +00:00
|
|
|
if (!page)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
length = -EFAULT;
|
|
|
|
if (copy_from_user(page, buf, count))
|
|
|
|
goto out_free;
|
|
|
|
|
2009-05-08 12:55:27 +00:00
|
|
|
/* Guard against adverse ptrace interaction */
|
|
|
|
length = mutex_lock_interruptible(&task->cred_guard_mutex);
|
|
|
|
if (length < 0)
|
|
|
|
goto out_free;
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
length = security_setprocattr(task,
|
2006-12-08 10:36:36 +00:00
|
|
|
(char*)file->f_path.dentry->d_name.name,
|
2006-10-02 09:17:05 +00:00
|
|
|
(void*)page, count);
|
2009-05-08 12:55:27 +00:00
|
|
|
mutex_unlock(&task->cred_guard_mutex);
|
2006-10-02 09:17:05 +00:00
|
|
|
out_free:
|
|
|
|
free_page((unsigned long) page);
|
|
|
|
out:
|
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_pid_attr_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.read = proc_pid_attr_read,
|
|
|
|
.write = proc_pid_attr_write,
|
|
|
|
};
|
|
|
|
|
2007-05-08 07:26:15 +00:00
|
|
|
static const struct pid_entry attr_dir_stuff[] = {
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("current", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
|
|
|
|
REG("prev", S_IRUGO, proc_pid_attr_operations),
|
|
|
|
REG("exec", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
|
|
|
|
REG("fscreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
|
|
|
|
REG("keycreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
|
|
|
|
REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
};
|
|
|
|
|
2006-10-02 09:18:50 +00:00
|
|
|
static int proc_attr_dir_readdir(struct file * filp,
|
2006-10-02 09:17:05 +00:00
|
|
|
void * dirent, filldir_t filldir)
|
|
|
|
{
|
|
|
|
return proc_pident_readdir(filp,dirent,filldir,
|
2006-10-02 09:18:50 +00:00
|
|
|
attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
|
2006-10-02 09:17:05 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_attr_dir_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.read = generic_read_dir,
|
2006-10-02 09:18:50 +00:00
|
|
|
.readdir = proc_attr_dir_readdir,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2006-10-02 09:18:50 +00:00
|
|
|
static struct dentry *proc_attr_dir_lookup(struct inode *dir,
|
2006-10-02 09:17:05 +00:00
|
|
|
struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
2006-10-02 09:18:56 +00:00
|
|
|
return proc_pident_lookup(dir, dentry,
|
|
|
|
attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
|
2006-10-02 09:17:05 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_attr_dir_inode_operations = {
|
2006-10-02 09:18:50 +00:00
|
|
|
.lookup = proc_attr_dir_lookup,
|
2006-06-26 07:25:55 +00:00
|
|
|
.getattr = pid_getattr,
|
2006-07-15 19:26:45 +00:00
|
|
|
.setattr = proc_setattr,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
|
|
|
|
2007-07-19 08:48:28 +00:00
|
|
|
#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
|
|
|
|
static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
|
|
|
|
struct mm_struct *mm;
|
|
|
|
char buffer[PROC_NUMBUF];
|
|
|
|
size_t len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
return -ESRCH;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
mm = get_task_mm(task);
|
|
|
|
if (mm) {
|
|
|
|
len = snprintf(buffer, sizeof(buffer), "%08lx\n",
|
|
|
|
((mm->flags & MMF_DUMP_FILTER_MASK) >>
|
|
|
|
MMF_DUMP_FILTER_SHIFT));
|
|
|
|
mmput(mm);
|
|
|
|
ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
put_task_struct(task);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t proc_coredump_filter_write(struct file *file,
|
|
|
|
const char __user *buf,
|
|
|
|
size_t count,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct task_struct *task;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
char buffer[PROC_NUMBUF], *end;
|
|
|
|
unsigned int val;
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
unsigned long mask;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
|
|
|
memset(buffer, 0, sizeof(buffer));
|
|
|
|
if (count > sizeof(buffer) - 1)
|
|
|
|
count = sizeof(buffer) - 1;
|
|
|
|
if (copy_from_user(buffer, buf, count))
|
|
|
|
goto out_no_task;
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
val = (unsigned int)simple_strtoul(buffer, &end, 0);
|
|
|
|
if (*end == '\n')
|
|
|
|
end++;
|
|
|
|
if (end - buffer == 0)
|
|
|
|
goto out_no_task;
|
|
|
|
|
|
|
|
ret = -ESRCH;
|
|
|
|
task = get_proc_task(file->f_dentry->d_inode);
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
|
|
|
|
ret = end - buffer;
|
|
|
|
mm = get_task_mm(task);
|
|
|
|
if (!mm)
|
|
|
|
goto out_no_mm;
|
|
|
|
|
|
|
|
for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
|
|
|
|
if (val & mask)
|
|
|
|
set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
|
|
|
|
else
|
|
|
|
clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
mmput(mm);
|
|
|
|
out_no_mm:
|
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations proc_coredump_filter_operations = {
|
|
|
|
.read = proc_coredump_filter_read,
|
|
|
|
.write = proc_coredump_filter_write,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
/*
|
|
|
|
* /proc/self:
|
|
|
|
*/
|
|
|
|
static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
|
|
|
|
int buflen)
|
|
|
|
{
|
2008-02-08 12:18:34 +00:00
|
|
|
struct pid_namespace *ns = dentry->d_sb->s_fs_info;
|
2008-02-08 23:00:43 +00:00
|
|
|
pid_t tgid = task_tgid_nr_ns(current, ns);
|
2006-10-02 09:17:05 +00:00
|
|
|
char tmp[PROC_NUMBUF];
|
2008-02-08 23:00:43 +00:00
|
|
|
if (!tgid)
|
2008-02-08 12:18:34 +00:00
|
|
|
return -ENOENT;
|
2008-02-08 23:00:43 +00:00
|
|
|
sprintf(tmp, "%d", tgid);
|
2006-10-02 09:17:05 +00:00
|
|
|
return vfs_readlink(dentry,buffer,buflen,tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
2008-02-08 12:18:34 +00:00
|
|
|
struct pid_namespace *ns = dentry->d_sb->s_fs_info;
|
2008-02-08 23:00:43 +00:00
|
|
|
pid_t tgid = task_tgid_nr_ns(current, ns);
|
2006-10-02 09:17:05 +00:00
|
|
|
char tmp[PROC_NUMBUF];
|
2008-02-08 23:00:43 +00:00
|
|
|
if (!tgid)
|
2008-02-08 12:18:34 +00:00
|
|
|
return ERR_PTR(-ENOENT);
|
2008-02-08 23:00:43 +00:00
|
|
|
sprintf(tmp, "%d", task_tgid_nr_ns(current, ns));
|
2006-10-02 09:17:05 +00:00
|
|
|
return ERR_PTR(vfs_follow_link(nd,tmp));
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_self_inode_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.readlink = proc_self_readlink,
|
|
|
|
.follow_link = proc_self_follow_link,
|
|
|
|
};
|
|
|
|
|
2006-10-02 09:18:48 +00:00
|
|
|
/*
|
|
|
|
* proc base
|
|
|
|
*
|
|
|
|
* These are the directory entries in the root directory of /proc
|
|
|
|
* that properly belong to the /proc filesystem, as they describe
|
|
|
|
* describe something that is process related.
|
|
|
|
*/
|
2007-05-08 07:26:15 +00:00
|
|
|
static const struct pid_entry proc_base_stuff[] = {
|
2006-10-02 09:18:49 +00:00
|
|
|
NOD("self", S_IFLNK|S_IRWXUGO,
|
2006-10-02 09:18:48 +00:00
|
|
|
&proc_self_inode_operations, NULL, {}),
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Exceptional case: normally we are not allowed to unhash a busy
|
|
|
|
* directory. In this case, however, we can do it - no aliasing problems
|
|
|
|
* due to the way we treat inodes.
|
|
|
|
*/
|
|
|
|
static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
struct task_struct *task = get_proc_task(inode);
|
|
|
|
if (task) {
|
|
|
|
put_task_struct(task);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
d_drop(dentry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-02-20 05:58:47 +00:00
|
|
|
static const struct dentry_operations proc_base_dentry_operations =
|
2006-10-02 09:18:48 +00:00
|
|
|
{
|
|
|
|
.d_revalidate = proc_base_revalidate,
|
|
|
|
.d_delete = pid_delete_dentry,
|
|
|
|
};
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static struct dentry *proc_base_instantiate(struct inode *dir,
|
2007-05-08 07:26:15 +00:00
|
|
|
struct dentry *dentry, struct task_struct *task, const void *ptr)
|
2006-10-02 09:18:48 +00:00
|
|
|
{
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *p = ptr;
|
2006-10-02 09:18:48 +00:00
|
|
|
struct inode *inode;
|
|
|
|
struct proc_inode *ei;
|
2006-10-02 09:18:49 +00:00
|
|
|
struct dentry *error = ERR_PTR(-EINVAL);
|
2006-10-02 09:18:48 +00:00
|
|
|
|
|
|
|
/* Allocate the inode */
|
|
|
|
error = ERR_PTR(-ENOMEM);
|
|
|
|
inode = new_inode(dir->i_sb);
|
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Initialize the inode */
|
|
|
|
ei = PROC_I(inode);
|
|
|
|
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* grab the reference to the task.
|
|
|
|
*/
|
2006-10-02 09:18:59 +00:00
|
|
|
ei->pid = get_task_pid(task, PIDTYPE_PID);
|
2006-10-02 09:18:48 +00:00
|
|
|
if (!ei->pid)
|
|
|
|
goto out_iput;
|
|
|
|
|
|
|
|
inode->i_mode = p->mode;
|
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
inode->i_nlink = 2;
|
|
|
|
if (S_ISLNK(inode->i_mode))
|
|
|
|
inode->i_size = 64;
|
|
|
|
if (p->iop)
|
|
|
|
inode->i_op = p->iop;
|
|
|
|
if (p->fop)
|
|
|
|
inode->i_fop = p->fop;
|
|
|
|
ei->op = p->op;
|
|
|
|
dentry->d_op = &proc_base_dentry_operations;
|
|
|
|
d_add(dentry, inode);
|
|
|
|
error = NULL;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
out_iput:
|
|
|
|
iput(inode);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct dentry *error;
|
|
|
|
struct task_struct *task = get_proc_task(dir);
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *p, *last;
|
2006-10-02 09:18:49 +00:00
|
|
|
|
|
|
|
error = ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
|
|
|
|
/* Lookup the directory entry */
|
2006-10-02 09:18:56 +00:00
|
|
|
last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1];
|
|
|
|
for (p = proc_base_stuff; p <= last; p++) {
|
2006-10-02 09:18:49 +00:00
|
|
|
if (p->len != dentry->d_name.len)
|
|
|
|
continue;
|
|
|
|
if (!memcmp(dentry->d_name.name, p->name, p->len))
|
|
|
|
break;
|
|
|
|
}
|
2006-10-02 09:18:56 +00:00
|
|
|
if (p > last)
|
2006-10-02 09:18:49 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
error = proc_base_instantiate(dir, dentry, task, p);
|
|
|
|
|
|
|
|
out:
|
|
|
|
put_task_struct(task);
|
|
|
|
out_no_task:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-05-08 07:26:15 +00:00
|
|
|
static int proc_base_fill_cache(struct file *filp, void *dirent,
|
|
|
|
filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
|
|
|
return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
|
|
|
|
proc_base_instantiate, task, p);
|
|
|
|
}
|
|
|
|
|
2006-12-10 10:19:48 +00:00
|
|
|
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
2008-07-25 08:48:49 +00:00
|
|
|
static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
|
|
|
|
{
|
2008-07-27 22:48:12 +00:00
|
|
|
struct task_io_accounting acct = task->ioac;
|
2008-07-27 15:29:15 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (whole && lock_task_sighand(task, &flags)) {
|
|
|
|
struct task_struct *t = task;
|
|
|
|
|
|
|
|
task_io_accounting_add(&acct, &task->signal->ioac);
|
|
|
|
while_each_thread(task, t)
|
|
|
|
task_io_accounting_add(&acct, &t->ioac);
|
|
|
|
|
|
|
|
unlock_task_sighand(task, &flags);
|
2008-07-25 08:48:49 +00:00
|
|
|
}
|
2006-12-10 10:19:48 +00:00
|
|
|
return sprintf(buffer,
|
|
|
|
"rchar: %llu\n"
|
|
|
|
"wchar: %llu\n"
|
|
|
|
"syscr: %llu\n"
|
|
|
|
"syscw: %llu\n"
|
|
|
|
"read_bytes: %llu\n"
|
|
|
|
"write_bytes: %llu\n"
|
|
|
|
"cancelled_write_bytes: %llu\n",
|
2008-08-05 20:01:34 +00:00
|
|
|
(unsigned long long)acct.rchar,
|
|
|
|
(unsigned long long)acct.wchar,
|
|
|
|
(unsigned long long)acct.syscr,
|
|
|
|
(unsigned long long)acct.syscw,
|
|
|
|
(unsigned long long)acct.read_bytes,
|
|
|
|
(unsigned long long)acct.write_bytes,
|
|
|
|
(unsigned long long)acct.cancelled_write_bytes);
|
2008-07-25 08:48:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int proc_tid_io_accounting(struct task_struct *task, char *buffer)
|
|
|
|
{
|
|
|
|
return do_io_accounting(task, buffer, 0);
|
2006-12-10 10:19:48 +00:00
|
|
|
}
|
2008-07-25 08:48:49 +00:00
|
|
|
|
|
|
|
static int proc_tgid_io_accounting(struct task_struct *task, char *buffer)
|
|
|
|
{
|
|
|
|
return do_io_accounting(task, buffer, 1);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_TASK_IO_ACCOUNTING */
|
2006-12-10 10:19:48 +00:00
|
|
|
|
2008-10-05 23:11:58 +00:00
|
|
|
static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
|
|
|
|
struct pid *pid, struct task_struct *task)
|
|
|
|
{
|
|
|
|
seq_printf(m, "%08x\n", task->personality);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
/*
|
|
|
|
* Thread groups
|
|
|
|
*/
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_task_operations;
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_task_inode_operations;
|
2006-10-02 09:17:07 +00:00
|
|
|
|
2007-05-08 07:26:15 +00:00
|
|
|
static const struct pid_entry tgid_base_stuff[] = {
|
2008-11-09 22:32:52 +00:00
|
|
|
DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
|
|
|
|
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
|
|
|
|
DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
|
2008-03-12 01:03:35 +00:00
|
|
|
#ifdef CONFIG_NET
|
2008-11-09 22:32:52 +00:00
|
|
|
DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
|
2008-03-12 01:03:35 +00:00
|
|
|
#endif
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("environ", S_IRUSR, proc_environ_operations),
|
|
|
|
INF("auxv", S_IRUSR, proc_pid_auxv),
|
|
|
|
ONE("status", S_IRUGO, proc_pid_status),
|
|
|
|
ONE("personality", S_IRUSR, proc_pid_personality),
|
|
|
|
INF("limits", S_IRUSR, proc_pid_limits),
|
2007-07-09 16:52:00 +00:00
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
|
2008-07-26 02:46:00 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("syscall", S_IRUSR, proc_pid_syscall),
|
2007-07-09 16:52:00 +00:00
|
|
|
#endif
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("cmdline", S_IRUGO, proc_pid_cmdline),
|
|
|
|
ONE("stat", S_IRUGO, proc_tgid_stat),
|
|
|
|
ONE("statm", S_IRUGO, proc_pid_statm),
|
|
|
|
REG("maps", S_IRUGO, proc_maps_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("numa_maps", S_IRUGO, proc_numa_maps_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations),
|
|
|
|
LNK("cwd", proc_cwd_link),
|
|
|
|
LNK("root", proc_root_link),
|
|
|
|
LNK("exe", proc_exe_link),
|
|
|
|
REG("mounts", S_IRUGO, proc_mounts_operations),
|
|
|
|
REG("mountinfo", S_IRUGO, proc_mountinfo_operations),
|
|
|
|
REG("mountstats", S_IRUSR, proc_mountstats_operations),
|
2008-02-05 06:29:07 +00:00
|
|
|
#ifdef CONFIG_PROC_PAGE_MONITOR
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
|
|
|
|
REG("smaps", S_IRUGO, proc_smaps_operations),
|
|
|
|
REG("pagemap", S_IRUSR, proc_pagemap_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SECURITY
|
2008-11-09 22:32:52 +00:00
|
|
|
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_KALLSYMS
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("wchan", S_IRUGO, proc_pid_wchan),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-11-10 08:26:08 +00:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
|
|
|
ONE("stack", S_IRUSR, proc_pid_stack),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("schedstat", S_IRUGO, proc_pid_schedstat),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-01-25 20:08:34 +00:00
|
|
|
#ifdef CONFIG_LATENCYTOP
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("latency", S_IRUGO, proc_lstats_operations),
|
2008-01-25 20:08:34 +00:00
|
|
|
#endif
|
2007-10-19 06:39:39 +00:00
|
|
|
#ifdef CONFIG_PROC_PID_CPUSET
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("cpuset", S_IRUGO, proc_cpuset_operations),
|
2007-10-19 06:39:35 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CGROUPS
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("cgroup", S_IRUGO, proc_cgroup_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("oom_score", S_IRUGO, proc_oom_score),
|
|
|
|
REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
|
|
|
|
REG("sessionid", S_IRUGO, proc_sessionid_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2006-12-08 10:39:47 +00:00
|
|
|
#ifdef CONFIG_FAULT_INJECTION
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
|
2006-12-08 10:39:47 +00:00
|
|
|
#endif
|
2007-07-19 08:48:28 +00:00
|
|
|
#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
|
2007-07-19 08:48:28 +00:00
|
|
|
#endif
|
2006-12-10 10:19:48 +00:00
|
|
|
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("io", S_IRUGO, proc_tgid_io_accounting),
|
2006-12-10 10:19:48 +00:00
|
|
|
#endif
|
2006-10-02 09:17:05 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static int proc_tgid_base_readdir(struct file * filp,
|
2005-04-16 22:20:36 +00:00
|
|
|
void * dirent, filldir_t filldir)
|
|
|
|
{
|
|
|
|
return proc_pident_readdir(filp,dirent,filldir,
|
2006-10-02 09:17:05 +00:00
|
|
|
tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_tgid_base_operations = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.read = generic_read_dir,
|
2006-10-02 09:17:05 +00:00
|
|
|
.readdir = proc_tgid_base_readdir,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
|
2006-10-02 09:18:56 +00:00
|
|
|
return proc_pident_lookup(dir, dentry,
|
|
|
|
tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_tgid_base_inode_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.lookup = proc_tgid_base_lookup,
|
2006-06-26 07:25:55 +00:00
|
|
|
.getattr = pid_getattr,
|
2006-07-15 19:26:45 +00:00
|
|
|
.setattr = proc_setattr,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2007-10-19 06:40:03 +00:00
|
|
|
static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-06-26 07:25:48 +00:00
|
|
|
struct dentry *dentry, *leader, *dir;
|
2006-06-26 07:25:54 +00:00
|
|
|
char buf[PROC_NUMBUF];
|
2006-06-26 07:25:48 +00:00
|
|
|
struct qstr name;
|
|
|
|
|
|
|
|
name.name = buf;
|
2007-10-19 06:40:03 +00:00
|
|
|
name.len = snprintf(buf, sizeof(buf), "%d", pid);
|
|
|
|
dentry = d_hash_and_lookup(mnt->mnt_root, &name);
|
2006-06-26 07:25:48 +00:00
|
|
|
if (dentry) {
|
Fix /proc dcache deadlock in do_exit
This patch fixes a sles9 system hang in start_this_handle from a customer
with some heavy workload where all tasks are waiting on kjournald to commit
the transaction, but kjournald waits on t_updates to go down to zero (it
never does).
This was reported as a lowmem shortage deadlock but when checking the debug
data I noticed the VM wasn't under pressure at all (well it was really
under vm pressure, because lots of tasks hanged in the VM prune_dcache
methods trying to flush dirty inodes, but no task was hanging in GFP_NOFS
mode, the holder of the journal handle should have if this was a vm issue
in the first place).
No task was apparently holding the leftover handle in the committing
transaction, so I deduced t_updates was stuck to 1 because a journal_stop
was never run by some path (this turned out to be correct). With a debug
patch adding proper reverse links and stack trace logging in ext3 deployed
in production, I found journal_stop is never run because
mark_inode_dirty_sync is called inside release_task called by do_exit.
(that was quite fun because I would have never thought about this
subtleness, I thought a regular path in ext3 had a bug and it forgot to
call journal_stop)
do_exit->release_task->mark_inode_dirty_sync->schedule() (will never
come back to run journal_stop)
The reason is that shrink_dcache_parent is racy by design (feature not
a bug) and it can do blocking I/O in some case, but the point is that
calling shrink_dcache_parent at the last stage of do_exit isn't safe
for self-reaping tasks.
I guess the memory pressure of the unbalanced highmem system allowed
to trigger this more easily.
Now mainline doesn't have this line in iput (like sles9 has):
if (inode->i_state & I_DIRTY_DELAYED)
mark_inode_dirty_sync(inode);
so it will probably not crash with ext3, but for example ext2 implements an
I/O-blocking ext2_put_inode that will lead to similar screwups with
ext2_free_blocks never coming back and it's definitely wrong to call
blocking-IO paths inside do_exit. So this should fix a subtle bug in
mainline too (not verified in practice though). The equivalent fix for
ext3 is also not verified yet to fix the problem in sles9 but I don't have
doubt it will (it usually takes days to crash, so it'll take weeks to be
sure).
An alternate fix would be to offload that work to a kernel thread, but I
don't think a reschedule for this is worth it, the vm should be able to
collect those entries for the synchronous release_task.
Signed-off-by: Andrea Arcangeli <andrea@suse.de>
Cc: Jan Kara <jack@ucw.cz>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 06:29:21 +00:00
|
|
|
if (!(current->flags & PF_EXITING))
|
|
|
|
shrink_dcache_parent(dentry);
|
2006-06-26 07:25:48 +00:00
|
|
|
d_drop(dentry);
|
|
|
|
dput(dentry);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-19 06:40:03 +00:00
|
|
|
if (tgid == 0)
|
2006-06-26 07:25:48 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:48 +00:00
|
|
|
name.name = buf;
|
2007-10-19 06:40:03 +00:00
|
|
|
name.len = snprintf(buf, sizeof(buf), "%d", tgid);
|
|
|
|
leader = d_hash_and_lookup(mnt->mnt_root, &name);
|
2006-06-26 07:25:48 +00:00
|
|
|
if (!leader)
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:48 +00:00
|
|
|
name.name = "task";
|
|
|
|
name.len = strlen(name.name);
|
|
|
|
dir = d_hash_and_lookup(leader, &name);
|
|
|
|
if (!dir)
|
|
|
|
goto out_put_leader;
|
|
|
|
|
|
|
|
name.name = buf;
|
2007-10-19 06:40:03 +00:00
|
|
|
name.len = snprintf(buf, sizeof(buf), "%d", pid);
|
2006-06-26 07:25:48 +00:00
|
|
|
dentry = d_hash_and_lookup(dir, &name);
|
|
|
|
if (dentry) {
|
|
|
|
shrink_dcache_parent(dentry);
|
|
|
|
d_drop(dentry);
|
|
|
|
dput(dentry);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-06-26 07:25:48 +00:00
|
|
|
|
|
|
|
dput(dir);
|
|
|
|
out_put_leader:
|
|
|
|
dput(leader);
|
|
|
|
out:
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-10-22 04:00:10 +00:00
|
|
|
/**
|
|
|
|
* proc_flush_task - Remove dcache entries for @task from the /proc dcache.
|
|
|
|
* @task: task that should be flushed.
|
|
|
|
*
|
|
|
|
* When flushing dentries from proc, one needs to flush them from global
|
2007-10-19 06:40:03 +00:00
|
|
|
* proc (proc_mnt) and from all the namespaces' procs this task was seen
|
2007-10-22 04:00:10 +00:00
|
|
|
* in. This call is supposed to do all of this job.
|
|
|
|
*
|
|
|
|
* Looks in the dcache for
|
|
|
|
* /proc/@pid
|
|
|
|
* /proc/@tgid/task/@pid
|
|
|
|
* if either directory is present flushes it and all of it'ts children
|
|
|
|
* from the dcache.
|
|
|
|
*
|
|
|
|
* It is safe and reasonable to cache /proc entries for a task until
|
|
|
|
* that task exits. After that they just clog up the dcache with
|
|
|
|
* useless entries, possibly causing useful dcache entries to be
|
|
|
|
* flushed instead. This routine is proved to flush those useless
|
|
|
|
* dcache entries at process exit time.
|
|
|
|
*
|
|
|
|
* NOTE: This routine is just an optimization so it does not guarantee
|
|
|
|
* that no dcache entries will exist at process exit time it
|
|
|
|
* just makes it very unlikely that any will persist.
|
2007-10-19 06:40:03 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
void proc_flush_task(struct task_struct *task)
|
|
|
|
{
|
2007-11-15 01:00:07 +00:00
|
|
|
int i;
|
|
|
|
struct pid *pid, *tgid = NULL;
|
2007-10-19 06:40:11 +00:00
|
|
|
struct upid *upid;
|
|
|
|
|
|
|
|
pid = task_pid(task);
|
2007-11-15 01:00:07 +00:00
|
|
|
if (thread_group_leader(task))
|
|
|
|
tgid = task_tgid(task);
|
2007-10-19 06:40:11 +00:00
|
|
|
|
2007-11-15 01:00:07 +00:00
|
|
|
for (i = 0; i <= pid->level; i++) {
|
2007-10-19 06:40:11 +00:00
|
|
|
upid = &pid->numbers[i];
|
|
|
|
proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
|
2007-11-15 01:00:07 +00:00
|
|
|
tgid ? tgid->numbers[i].nr : 0);
|
2007-10-19 06:40:11 +00:00
|
|
|
}
|
2007-10-19 06:40:11 +00:00
|
|
|
|
|
|
|
upid = &pid->numbers[pid->level];
|
|
|
|
if (upid->nr == 1)
|
|
|
|
pid_ns_release_proc(upid->ns);
|
2007-10-19 06:40:03 +00:00
|
|
|
}
|
|
|
|
|
2006-12-07 04:38:31 +00:00
|
|
|
static struct dentry *proc_pid_instantiate(struct inode *dir,
|
|
|
|
struct dentry * dentry,
|
2007-05-08 07:26:15 +00:00
|
|
|
struct task_struct *task, const void *ptr)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
|
|
|
struct dentry *error = ERR_PTR(-ENOENT);
|
|
|
|
struct inode *inode;
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
inode = proc_pid_make_inode(dir->i_sb, task);
|
2006-10-02 09:18:49 +00:00
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
|
|
|
|
inode->i_op = &proc_tgid_base_inode_operations;
|
|
|
|
inode->i_fop = &proc_tgid_base_operations;
|
|
|
|
inode->i_flags|=S_IMMUTABLE;
|
2008-06-06 05:46:53 +00:00
|
|
|
|
|
|
|
inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff,
|
|
|
|
ARRAY_SIZE(tgid_base_stuff));
|
2006-10-02 09:18:49 +00:00
|
|
|
|
|
|
|
dentry->d_op = &pid_dentry_operations;
|
|
|
|
|
|
|
|
d_add(dentry, inode);
|
|
|
|
/* Close the race of the process dying before we return the dentry */
|
|
|
|
if (pid_revalidate(dentry, NULL))
|
|
|
|
error = NULL;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
|
|
|
|
{
|
2006-06-26 07:25:49 +00:00
|
|
|
struct dentry *result = ERR_PTR(-ENOENT);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct task_struct *task;
|
|
|
|
unsigned tgid;
|
2007-10-19 06:40:14 +00:00
|
|
|
struct pid_namespace *ns;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:18:48 +00:00
|
|
|
result = proc_base_lookup(dir, dentry);
|
|
|
|
if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
tgid = name_to_int(dentry);
|
|
|
|
if (tgid == ~0U)
|
|
|
|
goto out;
|
|
|
|
|
2007-10-19 06:40:14 +00:00
|
|
|
ns = dentry->d_sb->s_fs_info;
|
2006-06-26 07:25:51 +00:00
|
|
|
rcu_read_lock();
|
2007-10-19 06:40:14 +00:00
|
|
|
task = find_task_by_pid_ns(tgid, ns);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (task)
|
|
|
|
get_task_struct(task);
|
2006-06-26 07:25:51 +00:00
|
|
|
rcu_read_unlock();
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!task)
|
|
|
|
goto out;
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
result = proc_pid_instantiate(dir, dentry, task, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
put_task_struct(task);
|
|
|
|
out:
|
2006-06-26 07:25:49 +00:00
|
|
|
return result;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
* Find the first task with tgid >= tgid
|
2006-06-26 07:25:50 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-11-29 00:21:26 +00:00
|
|
|
struct tgid_iter {
|
|
|
|
unsigned int tgid;
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
struct task_struct *task;
|
2007-11-29 00:21:26 +00:00
|
|
|
};
|
|
|
|
static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
|
|
|
|
{
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
struct pid *pid;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-11-29 00:21:26 +00:00
|
|
|
if (iter.task)
|
|
|
|
put_task_struct(iter.task);
|
2006-06-26 07:25:51 +00:00
|
|
|
rcu_read_lock();
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
retry:
|
2007-11-29 00:21:26 +00:00
|
|
|
iter.task = NULL;
|
|
|
|
pid = find_ge_pid(iter.tgid, ns);
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
if (pid) {
|
2007-11-29 00:21:26 +00:00
|
|
|
iter.tgid = pid_nr_ns(pid, ns);
|
|
|
|
iter.task = pid_task(pid, PIDTYPE_PID);
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
/* What we to know is if the pid we have find is the
|
|
|
|
* pid of a thread_group_leader. Testing for task
|
|
|
|
* being a thread_group_leader is the obvious thing
|
|
|
|
* todo but there is a window when it fails, due to
|
|
|
|
* the pid transfer logic in de_thread.
|
|
|
|
*
|
|
|
|
* So we perform the straight forward test of seeing
|
|
|
|
* if the pid we have found is the pid of a thread
|
|
|
|
* group leader, and don't worry if the task we have
|
|
|
|
* found doesn't happen to be a thread group leader.
|
|
|
|
* As we don't care in the case of readdir.
|
|
|
|
*/
|
2007-11-29 00:21:26 +00:00
|
|
|
if (!iter.task || !has_group_leader_pid(iter.task)) {
|
|
|
|
iter.tgid += 1;
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
goto retry;
|
2007-11-29 00:21:26 +00:00
|
|
|
}
|
|
|
|
get_task_struct(iter.task);
|
2006-06-26 07:25:50 +00:00
|
|
|
}
|
2006-06-26 07:25:51 +00:00
|
|
|
rcu_read_unlock();
|
2007-11-29 00:21:26 +00:00
|
|
|
return iter;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-10-02 09:18:56 +00:00
|
|
|
#define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
|
2007-11-29 00:21:26 +00:00
|
|
|
struct tgid_iter iter)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
|
|
|
char name[PROC_NUMBUF];
|
2007-11-29 00:21:26 +00:00
|
|
|
int len = snprintf(name, sizeof(name), "%d", iter.tgid);
|
2006-10-02 09:18:49 +00:00
|
|
|
return proc_fill_cache(filp, dirent, filldir, name, len,
|
2007-11-29 00:21:26 +00:00
|
|
|
proc_pid_instantiate, iter.task, NULL);
|
2006-10-02 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* for the /proc/ directory itself, after non-process stuff has been done */
|
|
|
|
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
|
|
|
|
{
|
|
|
|
unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
|
2006-12-08 10:36:36 +00:00
|
|
|
struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
|
2007-11-29 00:21:26 +00:00
|
|
|
struct tgid_iter iter;
|
2007-10-19 06:40:14 +00:00
|
|
|
struct pid_namespace *ns;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
if (!reaper)
|
|
|
|
goto out_no_task;
|
|
|
|
|
2006-10-02 09:18:56 +00:00
|
|
|
for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) {
|
2007-05-08 07:26:15 +00:00
|
|
|
const struct pid_entry *p = &proc_base_stuff[nr];
|
2006-10-02 09:18:49 +00:00
|
|
|
if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0)
|
2006-10-02 09:18:48 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-10-19 06:40:14 +00:00
|
|
|
ns = filp->f_dentry->d_sb->s_fs_info;
|
2007-11-29 00:21:26 +00:00
|
|
|
iter.task = NULL;
|
|
|
|
iter.tgid = filp->f_pos - TGID_OFFSET;
|
|
|
|
for (iter = next_tgid(ns, iter);
|
|
|
|
iter.task;
|
|
|
|
iter.tgid += 1, iter = next_tgid(ns, iter)) {
|
|
|
|
filp->f_pos = iter.tgid + TGID_OFFSET;
|
|
|
|
if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
|
|
|
|
put_task_struct(iter.task);
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-06-26 07:25:50 +00:00
|
|
|
}
|
[PATCH] proc: readdir race fix (take 3)
The problem: An opendir, readdir, closedir sequence can fail to report
process ids that are continually in use throughout the sequence of system
calls. For this race to trigger the process that proc_pid_readdir stops at
must exit before readdir is called again.
This can cause ps to fail to report processes, and it is in violation of
posix guarantees and normal application expectations with respect to
readdir.
Currently there is no way to work around this problem in user space short
of providing a gargantuan buffer to user space so the directory read all
happens in on system call.
This patch implements the normal directory semantics for proc, that
guarantee that a directory entry that is neither created nor destroyed
while reading the directory entry will be returned. For directory that are
either created or destroyed during the readdir you may or may not see them.
Furthermore you may seek to a directory offset you have previously seen.
These are the guarantee that ext[23] provides and that posix requires, and
more importantly that user space expects. Plus it is a simple semantic to
implement reliable service. It is just a matter of calling readdir a
second time if you are wondering if something new has show up.
These better semantics are implemented by scanning through the pids in
numerical order and by making the file offset a pid plus a fixed offset.
The pid scan happens on the pid bitmap, which when you look at it is
remarkably efficient for a brute force algorithm. Given that a typical
cache line is 64 bytes and thus covers space for 64*8 == 200 pids. There
are only 40 cache lines for the entire 32K pid space. A typical system
will have 100 pids or more so this is actually fewer cache lines we have to
look at to scan a linked list, and the worst case of having to scan the
entire pid bitmap is pretty reasonable.
If we need something more efficient we can go to a more efficient data
structure for indexing the pids, but for now what we have should be
sufficient.
In addition this takes no additional locks and is actually less code than
what we are doing now.
Also another very subtle bug in this area has been fixed. It is possible
to catch a task in the middle of de_thread where a thread is assuming the
thread of it's thread group leader. This patch carefully handles that case
so if we hit it we don't fail to return the pid, that is undergoing the
de_thread dance.
Thanks to KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> for
providing the first fix, pointing this out and working on it.
[oleg@tv-sign.ru: fix it]
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jean Delvare <jdelvare@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-02 09:17:04 +00:00
|
|
|
filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
|
|
|
|
out:
|
2006-10-02 09:18:49 +00:00
|
|
|
put_task_struct(reaper);
|
|
|
|
out_no_task:
|
2006-06-26 07:25:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
/*
|
|
|
|
* Tasks
|
|
|
|
*/
|
2007-05-08 07:26:15 +00:00
|
|
|
static const struct pid_entry tid_base_stuff[] = {
|
2008-11-09 22:32:52 +00:00
|
|
|
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
|
|
|
|
DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
|
|
|
|
REG("environ", S_IRUSR, proc_environ_operations),
|
|
|
|
INF("auxv", S_IRUSR, proc_pid_auxv),
|
|
|
|
ONE("status", S_IRUGO, proc_pid_status),
|
|
|
|
ONE("personality", S_IRUSR, proc_pid_personality),
|
|
|
|
INF("limits", S_IRUSR, proc_pid_limits),
|
2007-07-09 16:52:00 +00:00
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
|
2008-07-26 02:46:00 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("syscall", S_IRUSR, proc_pid_syscall),
|
2007-07-09 16:52:00 +00:00
|
|
|
#endif
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("cmdline", S_IRUGO, proc_pid_cmdline),
|
|
|
|
ONE("stat", S_IRUGO, proc_tid_stat),
|
|
|
|
ONE("statm", S_IRUGO, proc_pid_statm),
|
|
|
|
REG("maps", S_IRUGO, proc_maps_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("numa_maps", S_IRUGO, proc_numa_maps_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations),
|
|
|
|
LNK("cwd", proc_cwd_link),
|
|
|
|
LNK("root", proc_root_link),
|
|
|
|
LNK("exe", proc_exe_link),
|
|
|
|
REG("mounts", S_IRUGO, proc_mounts_operations),
|
|
|
|
REG("mountinfo", S_IRUGO, proc_mountinfo_operations),
|
2008-02-05 06:29:07 +00:00
|
|
|
#ifdef CONFIG_PROC_PAGE_MONITOR
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
|
|
|
|
REG("smaps", S_IRUGO, proc_smaps_operations),
|
|
|
|
REG("pagemap", S_IRUSR, proc_pagemap_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SECURITY
|
2008-11-09 22:32:52 +00:00
|
|
|
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_KALLSYMS
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("wchan", S_IRUGO, proc_pid_wchan),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-11-10 08:26:08 +00:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
|
|
|
ONE("stack", S_IRUSR, proc_pid_stack),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("schedstat", S_IRUGO, proc_pid_schedstat),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-01-25 20:08:34 +00:00
|
|
|
#ifdef CONFIG_LATENCYTOP
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("latency", S_IRUGO, proc_lstats_operations),
|
2008-01-25 20:08:34 +00:00
|
|
|
#endif
|
2007-10-19 06:39:39 +00:00
|
|
|
#ifdef CONFIG_PROC_PID_CPUSET
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("cpuset", S_IRUGO, proc_cpuset_operations),
|
2007-10-19 06:39:35 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CGROUPS
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("cgroup", S_IRUGO, proc_cgroup_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("oom_score", S_IRUGO, proc_oom_score),
|
|
|
|
REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
|
|
|
|
REG("sessionid", S_IRUSR, proc_sessionid_operations),
|
2006-10-02 09:17:05 +00:00
|
|
|
#endif
|
2006-12-08 10:39:47 +00:00
|
|
|
#ifdef CONFIG_FAULT_INJECTION
|
2008-11-09 22:32:52 +00:00
|
|
|
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
|
2006-12-08 10:39:47 +00:00
|
|
|
#endif
|
2008-07-25 08:48:49 +00:00
|
|
|
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
2008-11-09 22:32:52 +00:00
|
|
|
INF("io", S_IRUGO, proc_tid_io_accounting),
|
2008-07-25 08:48:49 +00:00
|
|
|
#endif
|
2006-10-02 09:17:05 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int proc_tid_base_readdir(struct file * filp,
|
|
|
|
void * dirent, filldir_t filldir)
|
|
|
|
{
|
|
|
|
return proc_pident_readdir(filp,dirent,filldir,
|
|
|
|
tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
|
2006-10-02 09:18:56 +00:00
|
|
|
return proc_pident_lookup(dir, dentry,
|
|
|
|
tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
|
2006-10-02 09:17:05 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_tid_base_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.read = generic_read_dir,
|
|
|
|
.readdir = proc_tid_base_readdir,
|
|
|
|
};
|
|
|
|
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_tid_base_inode_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.lookup = proc_tid_base_lookup,
|
|
|
|
.getattr = pid_getattr,
|
|
|
|
.setattr = proc_setattr,
|
|
|
|
};
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static struct dentry *proc_task_instantiate(struct inode *dir,
|
2007-05-08 07:26:15 +00:00
|
|
|
struct dentry *dentry, struct task_struct *task, const void *ptr)
|
2006-10-02 09:18:49 +00:00
|
|
|
{
|
|
|
|
struct dentry *error = ERR_PTR(-ENOENT);
|
|
|
|
struct inode *inode;
|
2006-10-02 09:18:49 +00:00
|
|
|
inode = proc_pid_make_inode(dir->i_sb, task);
|
2006-10-02 09:18:49 +00:00
|
|
|
|
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
|
|
|
|
inode->i_op = &proc_tid_base_inode_operations;
|
|
|
|
inode->i_fop = &proc_tid_base_operations;
|
|
|
|
inode->i_flags|=S_IMMUTABLE;
|
2008-06-06 05:46:53 +00:00
|
|
|
|
|
|
|
inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff,
|
|
|
|
ARRAY_SIZE(tid_base_stuff));
|
2006-10-02 09:18:49 +00:00
|
|
|
|
|
|
|
dentry->d_op = &pid_dentry_operations;
|
|
|
|
|
|
|
|
d_add(dentry, inode);
|
|
|
|
/* Close the race of the process dying before we return the dentry */
|
|
|
|
if (pid_revalidate(dentry, NULL))
|
|
|
|
error = NULL;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2006-10-02 09:17:05 +00:00
|
|
|
static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct dentry *result = ERR_PTR(-ENOENT);
|
|
|
|
struct task_struct *task;
|
|
|
|
struct task_struct *leader = get_proc_task(dir);
|
|
|
|
unsigned tid;
|
2007-10-19 06:40:14 +00:00
|
|
|
struct pid_namespace *ns;
|
2006-10-02 09:17:05 +00:00
|
|
|
|
|
|
|
if (!leader)
|
|
|
|
goto out_no_task;
|
|
|
|
|
|
|
|
tid = name_to_int(dentry);
|
|
|
|
if (tid == ~0U)
|
|
|
|
goto out;
|
|
|
|
|
2007-10-19 06:40:14 +00:00
|
|
|
ns = dentry->d_sb->s_fs_info;
|
2006-10-02 09:17:05 +00:00
|
|
|
rcu_read_lock();
|
2007-10-19 06:40:14 +00:00
|
|
|
task = find_task_by_pid_ns(tid, ns);
|
2006-10-02 09:17:05 +00:00
|
|
|
if (task)
|
|
|
|
get_task_struct(task);
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (!task)
|
|
|
|
goto out;
|
2007-10-19 06:40:18 +00:00
|
|
|
if (!same_thread_group(leader, task))
|
2006-10-02 09:17:05 +00:00
|
|
|
goto out_drop_task;
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
result = proc_task_instantiate(dir, dentry, task, NULL);
|
2006-10-02 09:17:05 +00:00
|
|
|
out_drop_task:
|
|
|
|
put_task_struct(task);
|
|
|
|
out:
|
|
|
|
put_task_struct(leader);
|
|
|
|
out_no_task:
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2006-06-26 07:25:50 +00:00
|
|
|
/*
|
|
|
|
* Find the first tid of a thread group to return to user space.
|
|
|
|
*
|
|
|
|
* Usually this is just the thread group leader, but if the users
|
|
|
|
* buffer was too small or there was a seek into the middle of the
|
|
|
|
* directory we have more work todo.
|
|
|
|
*
|
|
|
|
* In the case of a short read we start with find_task_by_pid.
|
|
|
|
*
|
|
|
|
* In the case of a seek we start with the leader and walk nr
|
|
|
|
* threads past it.
|
|
|
|
*/
|
2006-06-26 07:26:01 +00:00
|
|
|
static struct task_struct *first_tid(struct task_struct *leader,
|
2007-10-19 06:40:14 +00:00
|
|
|
int tid, int nr, struct pid_namespace *ns)
|
2006-06-26 07:25:50 +00:00
|
|
|
{
|
2006-06-26 07:26:01 +00:00
|
|
|
struct task_struct *pos;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:26:01 +00:00
|
|
|
rcu_read_lock();
|
2006-06-26 07:25:50 +00:00
|
|
|
/* Attempt to start with the pid of a thread */
|
|
|
|
if (tid && (nr > 0)) {
|
2007-10-19 06:40:14 +00:00
|
|
|
pos = find_task_by_pid_ns(tid, ns);
|
2006-06-26 07:26:01 +00:00
|
|
|
if (pos && (pos->group_leader == leader))
|
|
|
|
goto found;
|
2006-06-26 07:25:50 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:25:50 +00:00
|
|
|
/* If nr exceeds the number of threads there is nothing todo */
|
2006-06-26 07:26:01 +00:00
|
|
|
pos = NULL;
|
|
|
|
if (nr && nr >= get_nr_threads(leader))
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:26:01 +00:00
|
|
|
/* If we haven't found our starting place yet start
|
|
|
|
* with the leader and walk nr threads forward.
|
2006-06-26 07:25:50 +00:00
|
|
|
*/
|
2006-06-26 07:26:01 +00:00
|
|
|
for (pos = leader; nr > 0; --nr) {
|
|
|
|
pos = next_thread(pos);
|
|
|
|
if (pos == leader) {
|
|
|
|
pos = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-06-26 07:26:01 +00:00
|
|
|
found:
|
|
|
|
get_task_struct(pos);
|
|
|
|
out:
|
2006-06-26 07:26:01 +00:00
|
|
|
rcu_read_unlock();
|
2006-06-26 07:25:50 +00:00
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the next thread in the thread list.
|
|
|
|
* Return NULL if there is an error or no next thread.
|
|
|
|
*
|
|
|
|
* The reference to the input task_struct is released.
|
|
|
|
*/
|
|
|
|
static struct task_struct *next_tid(struct task_struct *start)
|
|
|
|
{
|
2006-06-26 07:26:02 +00:00
|
|
|
struct task_struct *pos = NULL;
|
2006-06-26 07:26:01 +00:00
|
|
|
rcu_read_lock();
|
2006-06-26 07:26:02 +00:00
|
|
|
if (pid_alive(start)) {
|
2006-06-26 07:25:50 +00:00
|
|
|
pos = next_thread(start);
|
2006-06-26 07:26:02 +00:00
|
|
|
if (thread_group_leader(pos))
|
|
|
|
pos = NULL;
|
|
|
|
else
|
|
|
|
get_task_struct(pos);
|
|
|
|
}
|
2006-06-26 07:26:01 +00:00
|
|
|
rcu_read_unlock();
|
2006-06-26 07:25:50 +00:00
|
|
|
put_task_struct(start);
|
|
|
|
return pos;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-10-02 09:18:49 +00:00
|
|
|
static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
|
|
|
|
struct task_struct *task, int tid)
|
|
|
|
{
|
|
|
|
char name[PROC_NUMBUF];
|
|
|
|
int len = snprintf(name, sizeof(name), "%d", tid);
|
|
|
|
return proc_fill_cache(filp, dirent, filldir, name, len,
|
|
|
|
proc_task_instantiate, task, NULL);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* for the /proc/TGID/task/ directories */
|
|
|
|
static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
|
|
|
|
{
|
2006-12-08 10:36:36 +00:00
|
|
|
struct dentry *dentry = filp->f_path.dentry;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct inode *inode = dentry->d_inode;
|
2007-02-01 07:48:14 +00:00
|
|
|
struct task_struct *leader = NULL;
|
2006-06-26 07:25:50 +00:00
|
|
|
struct task_struct *task;
|
2005-04-16 22:20:36 +00:00
|
|
|
int retval = -ENOENT;
|
|
|
|
ino_t ino;
|
2006-06-26 07:25:50 +00:00
|
|
|
int tid;
|
2007-10-19 06:40:14 +00:00
|
|
|
struct pid_namespace *ns;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-02-01 07:48:14 +00:00
|
|
|
task = get_proc_task(inode);
|
|
|
|
if (!task)
|
|
|
|
goto out_no_task;
|
|
|
|
rcu_read_lock();
|
|
|
|
if (pid_alive(task)) {
|
|
|
|
leader = task->group_leader;
|
|
|
|
get_task_struct(leader);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
put_task_struct(task);
|
2006-06-26 07:25:55 +00:00
|
|
|
if (!leader)
|
|
|
|
goto out_no_task;
|
2005-04-16 22:20:36 +00:00
|
|
|
retval = 0;
|
|
|
|
|
2009-03-17 17:02:35 +00:00
|
|
|
switch ((unsigned long)filp->f_pos) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case 0:
|
|
|
|
ino = inode->i_ino;
|
2009-03-16 06:44:31 +00:00
|
|
|
if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
2009-03-16 06:44:31 +00:00
|
|
|
filp->f_pos++;
|
2005-04-16 22:20:36 +00:00
|
|
|
/* fall through */
|
|
|
|
case 1:
|
|
|
|
ino = parent_ino(dentry);
|
2009-03-16 06:44:31 +00:00
|
|
|
if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
2009-03-16 06:44:31 +00:00
|
|
|
filp->f_pos++;
|
2005-04-16 22:20:36 +00:00
|
|
|
/* fall through */
|
|
|
|
}
|
|
|
|
|
2006-06-26 07:25:50 +00:00
|
|
|
/* f_version caches the tgid value that the last readdir call couldn't
|
|
|
|
* return. lseek aka telldir automagically resets f_version to 0.
|
|
|
|
*/
|
2007-10-19 06:40:14 +00:00
|
|
|
ns = filp->f_dentry->d_sb->s_fs_info;
|
2007-10-17 06:27:21 +00:00
|
|
|
tid = (int)filp->f_version;
|
2006-06-26 07:25:50 +00:00
|
|
|
filp->f_version = 0;
|
2009-03-16 06:44:31 +00:00
|
|
|
for (task = first_tid(leader, tid, filp->f_pos - 2, ns);
|
2006-06-26 07:25:50 +00:00
|
|
|
task;
|
2009-03-16 06:44:31 +00:00
|
|
|
task = next_tid(task), filp->f_pos++) {
|
2007-10-19 06:40:14 +00:00
|
|
|
tid = task_pid_nr_ns(task, ns);
|
2006-10-02 09:18:49 +00:00
|
|
|
if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
|
2006-06-26 07:25:50 +00:00
|
|
|
/* returning this tgid failed, save it as the first
|
|
|
|
* pid for the next readir call */
|
2007-10-17 06:27:21 +00:00
|
|
|
filp->f_version = (u64)tid;
|
2006-06-26 07:25:50 +00:00
|
|
|
put_task_struct(task);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2006-06-26 07:25:50 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
out:
|
2006-06-26 07:25:55 +00:00
|
|
|
put_task_struct(leader);
|
|
|
|
out_no_task:
|
2005-04-16 22:20:36 +00:00
|
|
|
return retval;
|
|
|
|
}
|
2006-06-26 07:25:47 +00:00
|
|
|
|
|
|
|
static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
2006-06-26 07:25:55 +00:00
|
|
|
struct task_struct *p = get_proc_task(inode);
|
2006-06-26 07:25:47 +00:00
|
|
|
generic_fillattr(inode, stat);
|
|
|
|
|
2006-06-26 07:25:55 +00:00
|
|
|
if (p) {
|
|
|
|
stat->nlink += get_nr_threads(p);
|
|
|
|
put_task_struct(p);
|
2006-06-26 07:25:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2006-10-02 09:17:05 +00:00
|
|
|
|
2007-02-12 08:55:40 +00:00
|
|
|
static const struct inode_operations proc_task_inode_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.lookup = proc_task_lookup,
|
|
|
|
.getattr = proc_task_getattr,
|
|
|
|
.setattr = proc_setattr,
|
|
|
|
};
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations proc_task_operations = {
|
2006-10-02 09:17:05 +00:00
|
|
|
.read = generic_read_dir,
|
|
|
|
.readdir = proc_task_readdir,
|
|
|
|
};
|