mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-20 00:11:22 +00:00
Merge 'tip/perf/urgent' into perf/core to pick fixes
Needed to build perf/core buildable in some cases. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
33be4ef116
@ -223,27 +223,48 @@ static unsigned long
|
|||||||
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
|
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
|
||||||
{
|
{
|
||||||
struct kprobe *kp;
|
struct kprobe *kp;
|
||||||
|
unsigned long faddr;
|
||||||
|
|
||||||
kp = get_kprobe((void *)addr);
|
kp = get_kprobe((void *)addr);
|
||||||
/* There is no probe, return original address */
|
faddr = ftrace_location(addr);
|
||||||
if (!kp)
|
/*
|
||||||
|
* Addresses inside the ftrace location are refused by
|
||||||
|
* arch_check_ftrace_location(). Something went terribly wrong
|
||||||
|
* if such an address is checked here.
|
||||||
|
*/
|
||||||
|
if (WARN_ON(faddr && faddr != addr))
|
||||||
|
return 0UL;
|
||||||
|
/*
|
||||||
|
* Use the current code if it is not modified by Kprobe
|
||||||
|
* and it cannot be modified by ftrace.
|
||||||
|
*/
|
||||||
|
if (!kp && !faddr)
|
||||||
return addr;
|
return addr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Basically, kp->ainsn.insn has an original instruction.
|
* Basically, kp->ainsn.insn has an original instruction.
|
||||||
* However, RIP-relative instruction can not do single-stepping
|
* However, RIP-relative instruction can not do single-stepping
|
||||||
* at different place, __copy_instruction() tweaks the displacement of
|
* at different place, __copy_instruction() tweaks the displacement of
|
||||||
* that instruction. In that case, we can't recover the instruction
|
* that instruction. In that case, we can't recover the instruction
|
||||||
* from the kp->ainsn.insn.
|
* from the kp->ainsn.insn.
|
||||||
*
|
*
|
||||||
* On the other hand, kp->opcode has a copy of the first byte of
|
* On the other hand, in case on normal Kprobe, kp->opcode has a copy
|
||||||
* the probed instruction, which is overwritten by int3. And
|
* of the first byte of the probed instruction, which is overwritten
|
||||||
* the instruction at kp->addr is not modified by kprobes except
|
* by int3. And the instruction at kp->addr is not modified by kprobes
|
||||||
* for the first byte, we can recover the original instruction
|
* except for the first byte, we can recover the original instruction
|
||||||
* from it and kp->opcode.
|
* from it and kp->opcode.
|
||||||
|
*
|
||||||
|
* In case of Kprobes using ftrace, we do not have a copy of
|
||||||
|
* the original instruction. In fact, the ftrace location might
|
||||||
|
* be modified at anytime and even could be in an inconsistent state.
|
||||||
|
* Fortunately, we know that the original code is the ideal 5-byte
|
||||||
|
* long NOP.
|
||||||
*/
|
*/
|
||||||
memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||||
buf[0] = kp->opcode;
|
if (faddr)
|
||||||
|
memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
|
||||||
|
else
|
||||||
|
buf[0] = kp->opcode;
|
||||||
return (unsigned long)buf;
|
return (unsigned long)buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
|
|||||||
* Recover the probed instruction at addr for further analysis.
|
* Recover the probed instruction at addr for further analysis.
|
||||||
* Caller must lock kprobes by kprobe_mutex, or disable preemption
|
* Caller must lock kprobes by kprobe_mutex, or disable preemption
|
||||||
* for preventing to release referencing kprobes.
|
* for preventing to release referencing kprobes.
|
||||||
|
* Returns zero if the instruction can not get recovered.
|
||||||
*/
|
*/
|
||||||
unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
|
unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
|
||||||
{
|
{
|
||||||
@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr)
|
|||||||
* normally used, we just go through if there is no kprobe.
|
* normally used, we just go through if there is no kprobe.
|
||||||
*/
|
*/
|
||||||
__addr = recover_probed_instruction(buf, addr);
|
__addr = recover_probed_instruction(buf, addr);
|
||||||
|
if (!__addr)
|
||||||
|
return 0;
|
||||||
kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
|
kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
|
||||||
insn_get_length(&insn);
|
insn_get_length(&insn);
|
||||||
|
|
||||||
@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
|
|||||||
unsigned long recovered_insn =
|
unsigned long recovered_insn =
|
||||||
recover_probed_instruction(buf, (unsigned long)src);
|
recover_probed_instruction(buf, (unsigned long)src);
|
||||||
|
|
||||||
|
if (!recovered_insn)
|
||||||
|
return 0;
|
||||||
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
|
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
|
||||||
insn_get_length(&insn);
|
insn_get_length(&insn);
|
||||||
/* Another subsystem puts a breakpoint, failed to recover */
|
/* Another subsystem puts a breakpoint, failed to recover */
|
||||||
|
@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
|
|||||||
*/
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
recovered_insn = recover_probed_instruction(buf, addr);
|
recovered_insn = recover_probed_instruction(buf, addr);
|
||||||
|
if (!recovered_insn)
|
||||||
|
return 0;
|
||||||
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
|
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
|
||||||
insn_get_length(&insn);
|
insn_get_length(&insn);
|
||||||
/* Another subsystem puts a breakpoint */
|
/* Another subsystem puts a breakpoint */
|
||||||
|
@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
|
|||||||
memcpy_t fn = r->fn.memcpy;
|
memcpy_t fn = r->fn.memcpy;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
memcpy_alloc_mem(&src, &dst, len);
|
memcpy_alloc_mem(&dst, &src, len);
|
||||||
|
|
||||||
if (prefault)
|
if (prefault)
|
||||||
fn(dst, src, len);
|
fn(dst, src, len);
|
||||||
@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
|
|||||||
void *src = NULL, *dst = NULL;
|
void *src = NULL, *dst = NULL;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
memcpy_alloc_mem(&src, &dst, len);
|
memcpy_alloc_mem(&dst, &src, len);
|
||||||
|
|
||||||
if (prefault)
|
if (prefault)
|
||||||
fn(dst, src, len);
|
fn(dst, src, len);
|
||||||
|
@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64)
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(RAW_ARCH),sparc64)
|
||||||
|
ARCH ?= sparc
|
||||||
|
endif
|
||||||
|
|
||||||
ARCH ?= $(RAW_ARCH)
|
ARCH ?= $(RAW_ARCH)
|
||||||
|
|
||||||
LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
|
LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
|
||||||
|
@ -5,10 +5,11 @@ int main(void)
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
pthread_attr_t thread_attr;
|
pthread_attr_t thread_attr;
|
||||||
|
cpu_set_t cs;
|
||||||
|
|
||||||
pthread_attr_init(&thread_attr);
|
pthread_attr_init(&thread_attr);
|
||||||
/* don't care abt exact args, just the API itself in libpthread */
|
/* don't care abt exact args, just the API itself in libpthread */
|
||||||
ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
|
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,10 @@ static int perf_flag_probe(void)
|
|||||||
if (cpu < 0)
|
if (cpu < 0)
|
||||||
cpu = 0;
|
cpu = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Using -1 for the pid is a workaround to avoid gratuitous jump label
|
||||||
|
* changes.
|
||||||
|
*/
|
||||||
while (1) {
|
while (1) {
|
||||||
/* check cloexec flag */
|
/* check cloexec flag */
|
||||||
fd = sys_perf_event_open(&attr, pid, cpu, -1,
|
fd = sys_perf_event_open(&attr, pid, cpu, -1,
|
||||||
@ -47,16 +51,24 @@ static int perf_flag_probe(void)
|
|||||||
err, strerror_r(err, sbuf, sizeof(sbuf)));
|
err, strerror_r(err, sbuf, sizeof(sbuf)));
|
||||||
|
|
||||||
/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
|
/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
|
||||||
fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
|
while (1) {
|
||||||
|
fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
|
||||||
|
if (fd < 0 && pid == -1 && errno == EACCES) {
|
||||||
|
pid = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
err = errno;
|
err = errno;
|
||||||
|
|
||||||
|
if (fd >= 0)
|
||||||
|
close(fd);
|
||||||
|
|
||||||
if (WARN_ONCE(fd < 0 && err != EBUSY,
|
if (WARN_ONCE(fd < 0 && err != EBUSY,
|
||||||
"perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
|
"perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
|
||||||
err, strerror_r(err, sbuf, sizeof(sbuf))))
|
err, strerror_r(err, sbuf, sizeof(sbuf))))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
close(fd);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ struct perf_mmap {
|
|||||||
int mask;
|
int mask;
|
||||||
int refcnt;
|
int refcnt;
|
||||||
unsigned int prev;
|
unsigned int prev;
|
||||||
char event_copy[PERF_SAMPLE_MAX_SIZE];
|
char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
|
||||||
};
|
};
|
||||||
|
|
||||||
struct perf_evlist {
|
struct perf_evlist {
|
||||||
|
@ -11,6 +11,11 @@
|
|||||||
#include <symbol/kallsyms.h>
|
#include <symbol/kallsyms.h>
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
|
||||||
|
#ifndef EM_AARCH64
|
||||||
|
#define EM_AARCH64 183 /* ARM 64 bit */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
|
#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
|
||||||
extern char *cplus_demangle(const char *, int);
|
extern char *cplus_demangle(const char *, int);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user