mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-02-23 22:16:15 +00:00
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc/powermac: Build fix with SMP and CPU hotplug powerpc/perf_event: Skip updating kernel counters if register value shrinks powerpc: Don't write protect kernel text with CONFIG_DYNAMIC_FTRACE enabled powerpc: Fix oops if scan_dispatch_log is called too early powerpc/pseries: Use a kmem cache for DTL buffers powerpc/kexec: Fix regression causing compile failure on UP powerpc/85xx: disable Suspend support if SMP enabled powerpc/e500mc: Remove CPU_FTR_MAYBE_CAN_NAP/CPU_FTR_MAYBE_CAN_DOZE powerpc/book3e: Fix CPU feature handling on 64-bit e5500 powerpc: Check device status before adding serial device powerpc/85xx: Don't add disabled PCIe devices
This commit is contained in:
commit
5d5b1b9f79
@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE
|
|||||||
config ARCH_SUSPEND_POSSIBLE
|
config ARCH_SUSPEND_POSSIBLE
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
|
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
|
||||||
PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
|
(PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
|
||||||
|
|
||||||
config PPC_DCR_NATIVE
|
config PPC_DCR_NATIVE
|
||||||
bool
|
bool
|
||||||
|
@ -382,10 +382,12 @@ extern const char *powerpc_base_platform;
|
|||||||
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||||
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
|
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
|
||||||
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||||
#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
|
||||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
|
|
||||||
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||||
CPU_FTR_DBELL)
|
CPU_FTR_DBELL)
|
||||||
|
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
|
||||||
|
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
|
||||||
|
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
|
||||||
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
|
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
|
||||||
|
|
||||||
/* 64-bit CPUs */
|
/* 64-bit CPUs */
|
||||||
@ -435,11 +437,15 @@ extern const char *powerpc_base_platform;
|
|||||||
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
|
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
|
||||||
|
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
|
#ifdef CONFIG_PPC_BOOK3E
|
||||||
|
#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500)
|
||||||
|
#else
|
||||||
#define CPU_FTRS_POSSIBLE \
|
#define CPU_FTRS_POSSIBLE \
|
||||||
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
|
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
|
||||||
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
|
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
|
||||||
CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
|
CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
|
||||||
CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
|
CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
|
||||||
|
#endif
|
||||||
#else
|
#else
|
||||||
enum {
|
enum {
|
||||||
CPU_FTRS_POSSIBLE =
|
CPU_FTRS_POSSIBLE =
|
||||||
@ -473,16 +479,21 @@ enum {
|
|||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_E500
|
#ifdef CONFIG_E500
|
||||||
CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
|
CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
|
||||||
|
CPU_FTRS_E5500 |
|
||||||
#endif
|
#endif
|
||||||
0,
|
0,
|
||||||
};
|
};
|
||||||
#endif /* __powerpc64__ */
|
#endif /* __powerpc64__ */
|
||||||
|
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
|
#ifdef CONFIG_PPC_BOOK3E
|
||||||
|
#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500)
|
||||||
|
#else
|
||||||
#define CPU_FTRS_ALWAYS \
|
#define CPU_FTRS_ALWAYS \
|
||||||
(CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
|
(CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
|
||||||
CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
|
CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
|
||||||
CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
|
CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
|
||||||
|
#endif
|
||||||
#else
|
#else
|
||||||
enum {
|
enum {
|
||||||
CPU_FTRS_ALWAYS =
|
CPU_FTRS_ALWAYS =
|
||||||
@ -513,6 +524,7 @@ enum {
|
|||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_E500
|
#ifdef CONFIG_E500
|
||||||
CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
|
CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
|
||||||
|
CPU_FTRS_E5500 &
|
||||||
#endif
|
#endif
|
||||||
CPU_FTRS_POSSIBLE,
|
CPU_FTRS_POSSIBLE,
|
||||||
};
|
};
|
||||||
|
@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
|
|||||||
* on platforms where such control is possible.
|
* on platforms where such control is possible.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
|
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
|
||||||
defined(CONFIG_KPROBES)
|
defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
|
||||||
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
|
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
|
||||||
#else
|
#else
|
||||||
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
|
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
|
||||||
|
@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||||||
.pvr_mask = 0xffff0000,
|
.pvr_mask = 0xffff0000,
|
||||||
.pvr_value = 0x80240000,
|
.pvr_value = 0x80240000,
|
||||||
.cpu_name = "e5500",
|
.cpu_name = "e5500",
|
||||||
.cpu_features = CPU_FTRS_E500MC,
|
.cpu_features = CPU_FTRS_E5500,
|
||||||
.cpu_user_features = COMMON_USER_BOOKE,
|
.cpu_user_features = COMMON_USER_BOOKE,
|
||||||
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
|
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
|
||||||
MMU_FTR_USE_TLBILX,
|
MMU_FTR_USE_TLBILX,
|
||||||
|
@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
|
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
|
||||||
#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
|
#ifdef CONFIG_PPC_STD_MMU_64
|
||||||
static void crash_kexec_wait_realmode(int cpu)
|
static void crash_kexec_wait_realmode(int cpu)
|
||||||
{
|
{
|
||||||
unsigned int msecs;
|
unsigned int msecs;
|
||||||
@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
|
|||||||
}
|
}
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
#else
|
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||||
static inline void crash_kexec_wait_realmode(int cpu) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function will be called by secondary cpus or by kexec cpu
|
* This function will be called by secondary cpus or by kexec cpu
|
||||||
@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
|
|||||||
crash_ipi_callback(regs);
|
crash_ipi_callback(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else /* ! CONFIG_SMP */
|
||||||
|
static inline void crash_kexec_wait_realmode(int cpu) {}
|
||||||
|
|
||||||
static void crash_kexec_prepare_cpus(int cpu)
|
static void crash_kexec_prepare_cpus(int cpu)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
cpus_in_sr = CPU_MASK_NONE;
|
cpus_in_sr = CPU_MASK_NONE;
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register a function to be called on shutdown. Only use this if you
|
* Register a function to be called on shutdown. Only use this if you
|
||||||
|
@ -330,10 +330,12 @@ void __init find_legacy_serial_ports(void)
|
|||||||
if (!parent)
|
if (!parent)
|
||||||
continue;
|
continue;
|
||||||
if (of_match_node(legacy_serial_parents, parent) != NULL) {
|
if (of_match_node(legacy_serial_parents, parent) != NULL) {
|
||||||
|
if (of_device_is_available(np)) {
|
||||||
index = add_legacy_soc_port(np, np);
|
index = add_legacy_soc_port(np, np);
|
||||||
if (index >= 0 && np == stdout)
|
if (index >= 0 && np == stdout)
|
||||||
legacy_serial_console = index;
|
legacy_serial_console = index;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
of_node_put(parent);
|
of_node_put(parent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 check_and_compute_delta(u64 prev, u64 val)
|
||||||
|
{
|
||||||
|
u64 delta = (val - prev) & 0xfffffffful;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* POWER7 can roll back counter values, if the new value is smaller
|
||||||
|
* than the previous value it will cause the delta and the counter to
|
||||||
|
* have bogus values unless we rolled a counter over. If a coutner is
|
||||||
|
* rolled back, it will be smaller, but within 256, which is the maximum
|
||||||
|
* number of events to rollback at once. If we dectect a rollback
|
||||||
|
* return 0. This can lead to a small lack of precision in the
|
||||||
|
* counters.
|
||||||
|
*/
|
||||||
|
if (prev > val && (prev - val) < 256)
|
||||||
|
delta = 0;
|
||||||
|
|
||||||
|
return delta;
|
||||||
|
}
|
||||||
|
|
||||||
static void power_pmu_read(struct perf_event *event)
|
static void power_pmu_read(struct perf_event *event)
|
||||||
{
|
{
|
||||||
s64 val, delta, prev;
|
s64 val, delta, prev;
|
||||||
@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
|
|||||||
prev = local64_read(&event->hw.prev_count);
|
prev = local64_read(&event->hw.prev_count);
|
||||||
barrier();
|
barrier();
|
||||||
val = read_pmc(event->hw.idx);
|
val = read_pmc(event->hw.idx);
|
||||||
|
delta = check_and_compute_delta(prev, val);
|
||||||
|
if (!delta)
|
||||||
|
return;
|
||||||
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
|
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
|
||||||
|
|
||||||
/* The counters are only 32 bits wide */
|
|
||||||
delta = (val - prev) & 0xfffffffful;
|
|
||||||
local64_add(delta, &event->count);
|
local64_add(delta, &event->count);
|
||||||
local64_sub(delta, &event->hw.period_left);
|
local64_sub(delta, &event->hw.period_left);
|
||||||
}
|
}
|
||||||
@ -449,7 +469,8 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
|
|||||||
val = (event->hw.idx == 5) ? pmc5 : pmc6;
|
val = (event->hw.idx == 5) ? pmc5 : pmc6;
|
||||||
prev = local64_read(&event->hw.prev_count);
|
prev = local64_read(&event->hw.prev_count);
|
||||||
event->hw.idx = 0;
|
event->hw.idx = 0;
|
||||||
delta = (val - prev) & 0xfffffffful;
|
delta = check_and_compute_delta(prev, val);
|
||||||
|
if (delta)
|
||||||
local64_add(delta, &event->count);
|
local64_add(delta, &event->count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -458,13 +479,15 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
|
|||||||
unsigned long pmc5, unsigned long pmc6)
|
unsigned long pmc5, unsigned long pmc6)
|
||||||
{
|
{
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
u64 val;
|
u64 val, prev;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < cpuhw->n_limited; ++i) {
|
for (i = 0; i < cpuhw->n_limited; ++i) {
|
||||||
event = cpuhw->limited_counter[i];
|
event = cpuhw->limited_counter[i];
|
||||||
event->hw.idx = cpuhw->limited_hwidx[i];
|
event->hw.idx = cpuhw->limited_hwidx[i];
|
||||||
val = (event->hw.idx == 5) ? pmc5 : pmc6;
|
val = (event->hw.idx == 5) ? pmc5 : pmc6;
|
||||||
|
prev = local64_read(&event->hw.prev_count);
|
||||||
|
if (check_and_compute_delta(prev, val))
|
||||||
local64_set(&event->hw.prev_count, val);
|
local64_set(&event->hw.prev_count, val);
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
}
|
}
|
||||||
@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|||||||
|
|
||||||
/* we don't have to worry about interrupts here */
|
/* we don't have to worry about interrupts here */
|
||||||
prev = local64_read(&event->hw.prev_count);
|
prev = local64_read(&event->hw.prev_count);
|
||||||
delta = (val - prev) & 0xfffffffful;
|
delta = check_and_compute_delta(prev, val);
|
||||||
local64_add(delta, &event->count);
|
local64_add(delta, &event->count);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
|
|||||||
u64 stolen = 0;
|
u64 stolen = 0;
|
||||||
u64 dtb;
|
u64 dtb;
|
||||||
|
|
||||||
|
if (!dtl)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (i == vpa->dtl_idx)
|
if (i == vpa->dtl_idx)
|
||||||
return 0;
|
return 0;
|
||||||
while (i < vpa->dtl_idx) {
|
while (i < vpa->dtl_idx) {
|
||||||
|
@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
|
|||||||
mpic_setup_this_cpu();
|
mpic_setup_this_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
static int smp_core99_cpu_notify(struct notifier_block *self,
|
static int smp_core99_cpu_notify(struct notifier_block *self,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
|
|||||||
|
|
||||||
static void __init smp_core99_bringup_done(void)
|
static void __init smp_core99_bringup_done(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PPC64
|
|
||||||
extern void g5_phy_disable_cpu1(void);
|
extern void g5_phy_disable_cpu1(void);
|
||||||
|
|
||||||
/* Close i2c bus if it was used for tb sync */
|
/* Close i2c bus if it was used for tb sync */
|
||||||
@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void)
|
|||||||
set_cpu_present(1, false);
|
set_cpu_present(1, false);
|
||||||
g5_phy_disable_cpu1();
|
g5_phy_disable_cpu1();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PPC64 */
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
register_cpu_notifier(&smp_core99_cpu_nb);
|
register_cpu_notifier(&smp_core99_cpu_nb);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (ppc_md.progress)
|
if (ppc_md.progress)
|
||||||
ppc_md.progress("smp_core99_bringup_done", 0x349);
|
ppc_md.progress("smp_core99_bringup_done", 0x349);
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
|
||||||
@ -975,7 +975,9 @@ static void pmac_cpu_die(void)
|
|||||||
struct smp_ops_t core99_smp_ops = {
|
struct smp_ops_t core99_smp_ops = {
|
||||||
.message_pass = smp_mpic_message_pass,
|
.message_pass = smp_mpic_message_pass,
|
||||||
.probe = smp_core99_probe,
|
.probe = smp_core99_probe,
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
.bringup_done = smp_core99_bringup_done,
|
.bringup_done = smp_core99_bringup_done,
|
||||||
|
#endif
|
||||||
.kick_cpu = smp_core99_kick_cpu,
|
.kick_cpu = smp_core99_kick_cpu,
|
||||||
.setup_cpu = smp_core99_setup_cpu,
|
.setup_cpu = smp_core99_setup_cpu,
|
||||||
.give_timebase = smp_core99_give_timebase,
|
.give_timebase = smp_core99_give_timebase,
|
||||||
|
@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void)
|
|||||||
int cpu, ret;
|
int cpu, ret;
|
||||||
struct paca_struct *pp;
|
struct paca_struct *pp;
|
||||||
struct dtl_entry *dtl;
|
struct dtl_entry *dtl;
|
||||||
|
struct kmem_cache *dtl_cache;
|
||||||
|
|
||||||
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
|
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
|
||||||
|
DISPATCH_LOG_BYTES, 0, NULL);
|
||||||
|
if (!dtl_cache) {
|
||||||
|
pr_warn("Failed to create dispatch trace log buffer cache\n");
|
||||||
|
pr_warn("Stolen time statistics will be unreliable\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
pp = &paca[cpu];
|
pp = &paca[cpu];
|
||||||
dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
|
dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
|
||||||
cpu_to_node(cpu));
|
|
||||||
if (!dtl) {
|
if (!dtl) {
|
||||||
pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
|
pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
|
||||||
cpu);
|
cpu);
|
||||||
|
@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
|
|||||||
struct resource rsrc;
|
struct resource rsrc;
|
||||||
const int *bus_range;
|
const int *bus_range;
|
||||||
|
|
||||||
|
if (!of_device_is_available(dev)) {
|
||||||
|
pr_warning("%s: disabled\n", dev->full_name);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
pr_debug("Adding PCI host bridge %s\n", dev->full_name);
|
pr_debug("Adding PCI host bridge %s\n", dev->full_name);
|
||||||
|
|
||||||
/* Fetch host bridge registers address */
|
/* Fetch host bridge registers address */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user