mirror of
https://github.com/joel16/android_kernel_sony_msm8994_rework.git
synced 2024-11-23 11:59:58 +00:00
Merge upstream tag 'v3.10.40' into msm-3.10
* commit 'v3.10.40': (203 commits) Linux 3.10.40 ARC: !PREEMPT: Ensure Return to kernel mode is IRQ safe drm: cirrus: add power management support Input: synaptics - add min/max quirk for ThinkPad Edge E431 Input: synaptics - add min/max quirk for ThinkPad T431s, L440, L540, S1 Yoga and X1 lockd: ensure we tear down any live sockets when socket creation fails during lockd_up dm thin: fix dangling bio in process_deferred_bios error path dm transaction manager: fix corruption due to non-atomic transaction commit Skip intel_crt_init for Dell XPS 8700 mtd: sm_ftl: heap corruption in sm_create_sysfs_attributes() mtd: nuc900_nand: NULL dereference in nuc900_nand_enable() mtd: atmel_nand: Disable subpage NAND write when using Atmel PMECC tgafb: fix data copying gpio: mxs: Allow for recursive enable_irq_wake() call rtlwifi: rtl8188ee: initialize packet_beacon rtlwifi: rtl8192se: Fix regression due to commit 1bf4bbb rtlwifi: rtl8192se: Fix too long disable of IRQs rtlwifi: rtl8192cu: Fix too long disable of IRQs rtlwifi: rtl8188ee: Fix too long disable of IRQs rtlwifi: rtl8723ae: Fix too long disable of IRQs ... Change-Id: If5388cf980cb123e35e1b29275ba288c89c5aa18 Signed-off-by: Ian Maund <imaund@codeaurora.org>
This commit is contained in:
commit
491fb5c232
@ -55,6 +55,7 @@ zc3xx 0458:700f Genius VideoCam Web V2
|
||||
sonixj 0458:7025 Genius Eye 311Q
|
||||
sn9c20x 0458:7029 Genius Look 320s
|
||||
sonixj 0458:702e Genius Slim 310 NB
|
||||
sn9c20x 0458:7045 Genius Look 1320 V2
|
||||
sn9c20x 0458:704a Genius Slim 1320
|
||||
sn9c20x 0458:704c Genius i-Look 1321
|
||||
sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
|
||||
|
2
Makefile
2
Makefile
@ -1,6 +1,6 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 36
|
||||
SUBLEVEL = 40
|
||||
EXTRAVERSION =
|
||||
NAME = TOSSUG Baby Fish
|
||||
|
||||
|
@ -11,13 +11,16 @@
|
||||
|
||||
/ {
|
||||
compatible = "snps,nsimosci";
|
||||
clock-frequency = <80000000>; /* 80 MHZ */
|
||||
clock-frequency = <20000000>; /* 20 MHZ */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
interrupt-parent = <&intc>;
|
||||
|
||||
chosen {
|
||||
bootargs = "console=tty0 consoleblank=0";
|
||||
/* this is for console on PGU */
|
||||
/* bootargs = "console=tty0 consoleblank=0"; */
|
||||
/* this is for console on serial */
|
||||
bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
|
||||
};
|
||||
|
||||
aliases {
|
||||
@ -44,15 +47,14 @@
|
||||
};
|
||||
|
||||
uart0: serial@c0000000 {
|
||||
compatible = "snps,dw-apb-uart";
|
||||
compatible = "ns8250";
|
||||
reg = <0xc0000000 0x2000>;
|
||||
interrupts = <11>;
|
||||
#clock-frequency = <80000000>;
|
||||
clock-frequency = <3686400>;
|
||||
baud = <115200>;
|
||||
reg-shift = <2>;
|
||||
reg-io-width = <4>;
|
||||
status = "okay";
|
||||
no-loopback-test = <1>;
|
||||
};
|
||||
|
||||
pgu0: pgu@c9000000 {
|
||||
|
@ -54,6 +54,7 @@ CONFIG_SERIO_ARC_PS2=y
|
||||
CONFIG_SERIAL_8250=y
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_SERIAL_8250_DW=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
CONFIG_SERIAL_ARC=y
|
||||
CONFIG_SERIAL_ARC_CONSOLE=y
|
||||
# CONFIG_HW_RANDOM is not set
|
||||
|
@ -137,13 +137,6 @@ static inline void arch_unmask_irq(unsigned int irq)
|
||||
flag \scratch
|
||||
.endm
|
||||
|
||||
.macro IRQ_DISABLE_SAVE scratch, save
|
||||
lr \scratch, [status32]
|
||||
mov \save, \scratch /* Make a copy */
|
||||
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
lr \scratch, [status32]
|
||||
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
|
@ -589,11 +589,7 @@ ARC_ENTRY ret_from_exception
|
||||
; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
|
||||
ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
bbit0 r8, STATUS_U_BIT, resume_kernel_mode
|
||||
#else
|
||||
bbit0 r8, STATUS_U_BIT, restore_regs
|
||||
#endif
|
||||
|
||||
; Before returning to User mode check-for-and-complete any pending work
|
||||
; such as rescheduling/signal-delivery etc.
|
||||
@ -653,10 +649,15 @@ resume_user_mode_begin:
|
||||
b resume_user_mode_begin ; unconditionally back to U mode ret chks
|
||||
; for single exit point from this block
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
||||
resume_kernel_mode:
|
||||
|
||||
; Disable Interrupts from this point on
|
||||
; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
|
||||
; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
|
||||
IRQ_DISABLE r9
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
||||
; Can't preempt if preemption disabled
|
||||
GET_CURR_THR_INFO_FROM_SP r10
|
||||
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
|
||||
@ -666,8 +667,6 @@ resume_kernel_mode:
|
||||
ld r9, [r10, THREAD_INFO_FLAGS]
|
||||
bbit0 r9, TIF_NEED_RESCHED, restore_regs
|
||||
|
||||
IRQ_DISABLE r9
|
||||
|
||||
; Invoke PREEMPTION
|
||||
bl preempt_schedule_irq
|
||||
|
||||
@ -680,12 +679,11 @@ resume_kernel_mode:
|
||||
;
|
||||
; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
|
||||
; IRQ shd definitely not happen between now and rtie
|
||||
; All 2 entry points to here already disable interrupts
|
||||
|
||||
restore_regs :
|
||||
|
||||
; Disable Interrupts while restoring reg-file back
|
||||
; XXX can this be optimised out
|
||||
IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
|
||||
lr r10, [status32]
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
; Restore User R25
|
||||
|
@ -92,6 +92,7 @@
|
||||
#size-cells = <0>;
|
||||
compatible = "marvell,orion-mdio";
|
||||
reg = <0x72004 0x4>;
|
||||
clocks = <&gateclk 4>;
|
||||
};
|
||||
|
||||
ethernet@70000 {
|
||||
|
@ -263,6 +263,7 @@
|
||||
regulator-name = "vdd_g3d";
|
||||
regulator-min-microvolt = <1000000>;
|
||||
regulator-max-microvolt = <1000000>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
op_mode = <1>;
|
||||
};
|
||||
|
@ -156,7 +156,7 @@
|
||||
/* Select the best insn combination to perform the */ \
|
||||
/* actual __m * __n / (__p << 64) operation. */ \
|
||||
if (!__c) { \
|
||||
asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
|
||||
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
|
||||
"mov %Q0, #0" \
|
||||
: "=&r" (__res) \
|
||||
: "r" (__m), "r" (__n) \
|
||||
|
@ -3,11 +3,6 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
|
||||
/* ARM doesn't provide unprivileged exclusive memory accessors */
|
||||
#include <asm-generic/futex.h>
|
||||
#else
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
@ -164,6 +159,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* !(CPU_USE_DOMAINS && SMP) */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_ARM_FUTEX_H */
|
||||
|
@ -140,6 +140,7 @@
|
||||
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
|
||||
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
|
||||
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
|
||||
#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
|
||||
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -48,6 +48,5 @@
|
||||
*/
|
||||
#define __IGNORE_fadvise64_64
|
||||
#define __IGNORE_migrate_pages
|
||||
#define __IGNORE_kcmp
|
||||
|
||||
#endif /* __ASM_ARM_UNISTD_H */
|
||||
|
@ -169,3 +169,10 @@ void machine_kexec(struct kimage *image)
|
||||
|
||||
soft_restart(reboot_code_buffer_phys);
|
||||
}
|
||||
|
||||
void arch_crash_save_vmcoreinfo(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
VMCOREINFO_CONFIG(ARM_LPAE);
|
||||
#endif
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ static void __init ebsa110_map_io(void)
|
||||
iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc));
|
||||
}
|
||||
|
||||
static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size,
|
||||
static void __iomem *ebsa110_ioremap_caller(phys_addr_t cookie, size_t size,
|
||||
unsigned int flags, void *caller)
|
||||
{
|
||||
return (void __iomem *)cookie;
|
||||
|
@ -65,7 +65,7 @@ static void imx3_idle(void)
|
||||
: "=r" (reg));
|
||||
}
|
||||
|
||||
static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
|
||||
static void __iomem *imx3_ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||
unsigned int mtype, void *caller)
|
||||
{
|
||||
if (mtype == MT_DEVICE) {
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
#include "pci.h"
|
||||
|
||||
static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie,
|
||||
static void __iomem *__iop13xx_ioremap_caller(phys_addr_t cookie,
|
||||
size_t size, unsigned int mtype, void *caller)
|
||||
{
|
||||
void __iomem * retval;
|
||||
|
@ -559,7 +559,7 @@ void ixp4xx_restart(enum reboot_mode mode, const char *cmd)
|
||||
* fallback to the default.
|
||||
*/
|
||||
|
||||
static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size,
|
||||
static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size,
|
||||
unsigned int mtype, void *caller)
|
||||
{
|
||||
if (!is_pci_memory(addr))
|
||||
|
@ -222,6 +222,7 @@ void __init ti81xx_init_irq(void)
|
||||
static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs)
|
||||
{
|
||||
u32 irqnr;
|
||||
int handled_irq = 0;
|
||||
|
||||
do {
|
||||
irqnr = readl_relaxed(base_addr + 0x98);
|
||||
@ -249,8 +250,15 @@ out:
|
||||
if (irqnr) {
|
||||
irqnr = irq_find_mapping(domain, irqnr);
|
||||
handle_IRQ(irqnr, regs);
|
||||
handled_irq = 1;
|
||||
}
|
||||
} while (irqnr);
|
||||
|
||||
/* If an irq is masked or deasserted while active, we will
|
||||
* keep ending up here with no irq handled. So remove it from
|
||||
* the INTC with an ack.*/
|
||||
if (!handled_irq)
|
||||
omap_ack_irq(NULL);
|
||||
}
|
||||
|
||||
asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs)
|
||||
|
@ -1955,7 +1955,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
|
||||
static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
|
||||
.name = "usb_host_hs",
|
||||
.class = &omap3xxx_usb_host_hs_hwmod_class,
|
||||
.clkdm_name = "l3_init_clkdm",
|
||||
.clkdm_name = "usbhost_clkdm",
|
||||
.mpu_irqs = omap3xxx_usb_host_hs_irqs,
|
||||
.main_clk = "usbhost_48m_fck",
|
||||
.prcm = {
|
||||
@ -2040,7 +2040,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
|
||||
static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
|
||||
.name = "usb_tll_hs",
|
||||
.class = &omap3xxx_usb_tll_hs_hwmod_class,
|
||||
.clkdm_name = "l3_init_clkdm",
|
||||
.clkdm_name = "core_l4_clkdm",
|
||||
.mpu_irqs = omap3xxx_usb_tll_hs_irqs,
|
||||
.main_clk = "usbtll_fck",
|
||||
.prcm = {
|
||||
|
@ -103,7 +103,7 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { }
|
||||
|
||||
#define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0)
|
||||
|
||||
#if defined(CONFIG_ARCH_OMAP4)
|
||||
#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
|
||||
extern u16 pm44xx_errata;
|
||||
#define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id))
|
||||
#else
|
||||
|
@ -436,7 +436,6 @@ config CPU_32v5
|
||||
|
||||
config CPU_32v6
|
||||
bool
|
||||
select CPU_USE_DOMAINS if CPU_V6 && MMU
|
||||
select TLS_REG_EMUL if !CPU_32v6K && !MMU
|
||||
|
||||
config CPU_32v6K
|
||||
@ -651,7 +650,7 @@ config ARM_VIRT_EXT
|
||||
|
||||
config SWP_EMULATE
|
||||
bool "Emulate SWP/SWPB instructions"
|
||||
depends on !CPU_USE_DOMAINS && CPU_V7
|
||||
depends on CPU_V7
|
||||
default y if SMP
|
||||
select HAVE_PROC_CPU if PROC_FS
|
||||
help
|
||||
|
@ -344,7 +344,7 @@ void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||
unsigned int mtype, void *caller)
|
||||
{
|
||||
phys_addr_t last_addr;
|
||||
phys_addr_t offset = phys_addr & ~PAGE_MASK;
|
||||
unsigned long offset = phys_addr & ~PAGE_MASK;
|
||||
unsigned long pfn = __phys_to_pfn(phys_addr);
|
||||
|
||||
/*
|
||||
|
@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't use supersection mappings for mmap() on /dev/mem, which
|
||||
* means that we can't map the memory area above the 4G barrier into
|
||||
* userspace.
|
||||
* Do not allow /dev/mem mappings beyond the supported physical range.
|
||||
*/
|
||||
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
|
||||
{
|
||||
return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
|
||||
return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STRICT_DEVMEM
|
||||
|
@ -543,6 +543,16 @@ static void __init build_mem_type_table(void)
|
||||
s2_pgprot = cp->pte_s2;
|
||||
hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
|
||||
|
||||
/*
|
||||
* We don't use domains on ARMv6 (since this causes problems with
|
||||
* v6/v7 kernels), so we must use a separate memory type for user
|
||||
* r/o, kernel r/w to map the vectors page.
|
||||
*/
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
if (cpu_arch == CPU_ARCH_ARMv6)
|
||||
vecs_pgprot |= L_PTE_MT_VECTORS;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ARMv6 and above have extended page tables.
|
||||
*/
|
||||
|
@ -87,16 +87,16 @@ void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
|
||||
return __arm_ioremap_pfn(pfn, offset, size, mtype);
|
||||
}
|
||||
|
||||
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
|
||||
void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
|
||||
unsigned int mtype)
|
||||
{
|
||||
return (void __iomem *)phys_addr;
|
||||
}
|
||||
EXPORT_SYMBOL(__arm_ioremap);
|
||||
|
||||
void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *);
|
||||
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
|
||||
|
||||
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
|
||||
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||
unsigned int mtype, void *caller)
|
||||
{
|
||||
return __arm_ioremap(phys_addr, size, mtype);
|
||||
|
@ -112,13 +112,9 @@
|
||||
* 100x 1 0 1 r/o no acc
|
||||
* 10x0 1 0 1 r/o no acc
|
||||
* 1011 0 0 1 r/w no acc
|
||||
* 110x 0 1 0 r/w r/o
|
||||
* 11x0 0 1 0 r/w r/o
|
||||
* 1111 0 1 1 r/w r/w
|
||||
*
|
||||
* If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
|
||||
* 110x 1 1 1 r/o r/o
|
||||
* 11x0 1 1 1 r/o r/o
|
||||
* 1111 0 1 1 r/w r/w
|
||||
*/
|
||||
.macro armv6_mt_table pfx
|
||||
\pfx\()_mt_table:
|
||||
@ -137,7 +133,7 @@
|
||||
.long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
|
||||
.long 0x00 @ unused
|
||||
.long 0x00 @ unused
|
||||
.long 0x00 @ unused
|
||||
.long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
|
||||
.endm
|
||||
|
||||
.macro armv6_set_pte_ext pfx
|
||||
@ -158,24 +154,21 @@
|
||||
|
||||
tst r1, #L_PTE_USER
|
||||
orrne r3, r3, #PTE_EXT_AP1
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
@ allow kernel read/write access to read-only user pages
|
||||
tstne r3, #PTE_EXT_APX
|
||||
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
|
||||
#endif
|
||||
|
||||
@ user read-only -> kernel read-only
|
||||
bicne r3, r3, #PTE_EXT_AP0
|
||||
|
||||
tst r1, #L_PTE_XN
|
||||
orrne r3, r3, #PTE_EXT_XN
|
||||
|
||||
orr r3, r3, r2
|
||||
eor r3, r3, r2
|
||||
|
||||
tst r1, #L_PTE_YOUNG
|
||||
tstne r1, #L_PTE_PRESENT
|
||||
moveq r3, #0
|
||||
#ifndef CONFIG_CPU_USE_DOMAINS
|
||||
tstne r1, #L_PTE_NONE
|
||||
movne r3, #0
|
||||
#endif
|
||||
|
||||
str r3, [r0]
|
||||
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
|
||||
|
@ -90,21 +90,14 @@ ENTRY(cpu_v7_set_pte_ext)
|
||||
|
||||
tst r1, #L_PTE_USER
|
||||
orrne r3, r3, #PTE_EXT_AP1
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
@ allow kernel read/write access to read-only user pages
|
||||
tstne r3, #PTE_EXT_APX
|
||||
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
|
||||
#endif
|
||||
|
||||
tst r1, #L_PTE_XN
|
||||
orrne r3, r3, #PTE_EXT_XN
|
||||
|
||||
tst r1, #L_PTE_YOUNG
|
||||
tstne r1, #L_PTE_VALID
|
||||
#ifndef CONFIG_CPU_USE_DOMAINS
|
||||
eorne r1, r1, #L_PTE_NONE
|
||||
tstne r1, #L_PTE_NONE
|
||||
#endif
|
||||
moveq r3, #0
|
||||
|
||||
ARM( str r3, [r0, #2048]! )
|
||||
|
@ -211,7 +211,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
if (pte_valid_user(pte)) {
|
||||
if (pte_exec(pte))
|
||||
if (!pte_special(pte) && pte_exec(pte))
|
||||
__sync_icache_dcache(pte, addr);
|
||||
if (pte_dirty(pte) && pte_write(pte))
|
||||
pte_val(pte) &= ~PTE_RDONLY;
|
||||
@ -298,11 +298,11 @@ static inline int has_transparent_hugepage(void)
|
||||
* Mark the prot value as uncacheable and unbufferable.
|
||||
*/
|
||||
#define pgprot_noncached(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_writecombine(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_dmacoherent(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
struct file;
|
||||
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
|
@ -15,6 +15,7 @@ config M68K
|
||||
select FPU if MMU
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
|
||||
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select MODULES_USE_ELF_REL
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
@ -1571,17 +1571,17 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
||||
arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
|
||||
#else
|
||||
/* UserLocal not implemented */
|
||||
er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
|
||||
er = EMULATE_FAIL;
|
||||
#endif
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("RDHWR not supported\n");
|
||||
kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
|
||||
kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
|
||||
@ -1590,6 +1590,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
||||
*/
|
||||
if (er == EMULATE_FAIL) {
|
||||
vcpu->arch.pc = curr_pc;
|
||||
er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
|
||||
}
|
||||
return er;
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ LEAF(swsusp_arch_resume)
|
||||
bne t1, t3, 1b
|
||||
PTR_L t0, PBE_NEXT(t0)
|
||||
bnez t0, 0b
|
||||
jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
|
||||
PTR_LA t0, saved_regs
|
||||
PTR_L ra, PT_R31(t0)
|
||||
PTR_L sp, PT_R29(t0)
|
||||
|
@ -8,7 +8,11 @@
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define COMPAT_UTS_MACHINE "ppc\0\0"
|
||||
#else
|
||||
#define COMPAT_UTS_MACHINE "ppcle\0\0"
|
||||
#endif
|
||||
|
||||
typedef u32 compat_size_t;
|
||||
typedef s32 compat_ssize_t;
|
||||
|
@ -208,6 +208,7 @@
|
||||
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
|
||||
#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
|
||||
#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
|
||||
#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
|
||||
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
|
||||
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
|
||||
#define SPRN_CTRLF 0x088
|
||||
|
@ -523,6 +523,31 @@ out_and_saveregs:
|
||||
tm_save_sprs(thr);
|
||||
}
|
||||
|
||||
extern void __tm_recheckpoint(struct thread_struct *thread,
|
||||
unsigned long orig_msr);
|
||||
|
||||
void tm_recheckpoint(struct thread_struct *thread,
|
||||
unsigned long orig_msr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* We really can't be interrupted here as the TEXASR registers can't
|
||||
* change and later in the trecheckpoint code, we have a userspace R1.
|
||||
* So let's hard disable over this region.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
hard_irq_disable();
|
||||
|
||||
/* The TM SPRs are restored here, so that TEXASR.FS can be set
|
||||
* before the trecheckpoint and no explosion occurs.
|
||||
*/
|
||||
tm_restore_sprs(thread);
|
||||
|
||||
__tm_recheckpoint(thread, orig_msr);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void tm_recheckpoint_new_task(struct task_struct *new)
|
||||
{
|
||||
unsigned long msr;
|
||||
@ -541,13 +566,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
|
||||
if (!new->thread.regs)
|
||||
return;
|
||||
|
||||
/* The TM SPRs are restored here, so that TEXASR.FS can be set
|
||||
* before the trecheckpoint and no explosion occurs.
|
||||
*/
|
||||
tm_restore_sprs(&new->thread);
|
||||
|
||||
if (!MSR_TM_ACTIVE(new->thread.regs->msr))
|
||||
if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
|
||||
tm_restore_sprs(&new->thread);
|
||||
return;
|
||||
}
|
||||
msr = new->thread.tm_orig_msr;
|
||||
/* Recheckpoint to restore original checkpointed register state. */
|
||||
TM_DEBUG("*** tm_recheckpoint of pid %d "
|
||||
|
@ -863,6 +863,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
|
||||
* transactional versions should be loaded.
|
||||
*/
|
||||
tm_enable();
|
||||
/* Make sure the transaction is marked as failed */
|
||||
current->thread.tm_texasr |= TEXASR_FS;
|
||||
/* This loads the checkpointed FP/VEC state, if used */
|
||||
tm_recheckpoint(¤t->thread, msr);
|
||||
/* Get the top half of the MSR */
|
||||
|
@ -513,6 +513,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
|
||||
}
|
||||
#endif
|
||||
tm_enable();
|
||||
/* Make sure the transaction is marked as failed */
|
||||
current->thread.tm_texasr |= TEXASR_FS;
|
||||
/* This loads the checkpointed FP/VEC state, if used */
|
||||
tm_recheckpoint(¤t->thread, msr);
|
||||
|
||||
|
@ -296,7 +296,7 @@ dont_backup_fp:
|
||||
* Call with IRQs off, stacks get all out of sync for
|
||||
* some periods in here!
|
||||
*/
|
||||
_GLOBAL(tm_recheckpoint)
|
||||
_GLOBAL(__tm_recheckpoint)
|
||||
mfcr r5
|
||||
mflr r0
|
||||
std r5, 8(r1)
|
||||
|
@ -116,6 +116,7 @@ config S390
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
select HAVE_KERNEL_BZIP2
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZMA
|
||||
|
@ -219,7 +219,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
|
||||
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
|
||||
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
|
||||
|
||||
extern struct ccw_device *ccw_device_probe_console(void);
|
||||
extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *);
|
||||
extern void ccw_device_wait_idle(struct ccw_device *);
|
||||
extern int ccw_device_force_console(struct ccw_device *);
|
||||
|
||||
|
@ -243,7 +243,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
|
||||
case BPF_S_LD_W_IND:
|
||||
case BPF_S_LD_H_IND:
|
||||
case BPF_S_LD_B_IND:
|
||||
case BPF_S_LDX_B_MSH:
|
||||
case BPF_S_LD_IMM:
|
||||
case BPF_S_LD_MEM:
|
||||
case BPF_S_MISC_TXA:
|
||||
|
@ -115,7 +115,7 @@ static int print_trace_stack(void *data, char *name)
|
||||
*/
|
||||
static void print_trace_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
printk(data);
|
||||
printk("%s", (char *)data);
|
||||
printk_address(addr, reliable);
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ config SPARC
|
||||
select RTC_DRV_M48T59
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_JUMP_LABEL if SPARC64
|
||||
select GENERIC_IRQ_SHOW
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select GENERIC_PCI_IOMAP
|
||||
|
@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
|
||||
extern __must_check long strlen_user(const char __user *str);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
#define __copy_to_user_inatomic ___copy_to_user
|
||||
#define __copy_from_user_inatomic ___copy_from_user
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
struct pt_regs;
|
||||
extern unsigned long compute_effective_address(struct pt_regs *,
|
||||
|
@ -399,8 +399,8 @@ static void apb_fake_ranges(struct pci_dev *dev,
|
||||
apb_calc_first_last(map, &first, &last);
|
||||
res = bus->resource[1];
|
||||
res->flags = IORESOURCE_MEM;
|
||||
region.start = (first << 21);
|
||||
region.end = (last << 21) + ((1 << 21) - 1);
|
||||
region.start = (first << 29);
|
||||
region.end = (last << 29) + ((1 << 29) - 1);
|
||||
pcibios_bus_to_resource(dev, res, ®ion);
|
||||
}
|
||||
|
||||
|
@ -57,9 +57,12 @@ void arch_cpu_idle(void)
|
||||
{
|
||||
if (tlb_type != hypervisor) {
|
||||
touch_nmi_watchdog();
|
||||
local_irq_enable();
|
||||
} else {
|
||||
unsigned long pstate;
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
|
||||
* the cpu sleep hypervisor call.
|
||||
*/
|
||||
@ -81,7 +84,6 @@ void arch_cpu_idle(void)
|
||||
: "=&r" (pstate)
|
||||
: "i" (PSTATE_IE));
|
||||
}
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
@ -189,7 +189,8 @@ linux_sparc_syscall32:
|
||||
mov %i0, %l5 ! IEU1
|
||||
5: call %l7 ! CTI Group brk forced
|
||||
srl %i5, 0, %o5 ! IEU1
|
||||
ba,a,pt %xcc, 3f
|
||||
ba,pt %xcc, 3f
|
||||
sra %o0, 0, %o0
|
||||
|
||||
/* Linux native system calls enter here... */
|
||||
.align 32
|
||||
@ -217,7 +218,6 @@ linux_sparc_syscall:
|
||||
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
|
||||
ret_sys_call:
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
|
||||
sra %o0, 0, %o0
|
||||
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
|
||||
sllx %g2, 32, %g2
|
||||
|
||||
|
@ -865,6 +865,9 @@ fail:
|
||||
* Because the x86 boot code expects to be passed a boot_params we
|
||||
* need to create one ourselves (usually the bootloader would create
|
||||
* one for us).
|
||||
*
|
||||
* The caller is responsible for filling out ->code32_start in the
|
||||
* returned boot_params.
|
||||
*/
|
||||
struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
|
||||
{
|
||||
@ -921,8 +924,6 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
|
||||
hdr->vid_mode = 0xffff;
|
||||
hdr->boot_flag = 0xAA55;
|
||||
|
||||
hdr->code32_start = (__u64)(unsigned long)image->image_base;
|
||||
|
||||
hdr->type_of_loader = 0x21;
|
||||
|
||||
/* Convert unicode cmdline to ascii */
|
||||
|
@ -50,6 +50,13 @@ ENTRY(efi_pe_entry)
|
||||
pushl %eax
|
||||
pushl %esi
|
||||
pushl %ecx
|
||||
|
||||
call reloc
|
||||
reloc:
|
||||
popl %ecx
|
||||
subl reloc, %ecx
|
||||
movl %ecx, BP_code32_start(%eax)
|
||||
|
||||
sub $0x4, %esp
|
||||
|
||||
ENTRY(efi_stub_entry)
|
||||
@ -63,12 +70,7 @@ ENTRY(efi_stub_entry)
|
||||
hlt
|
||||
jmp 1b
|
||||
2:
|
||||
call 3f
|
||||
3:
|
||||
popl %eax
|
||||
subl $3b, %eax
|
||||
subl BP_pref_address(%esi), %eax
|
||||
add BP_code32_start(%esi), %eax
|
||||
movl BP_code32_start(%esi), %eax
|
||||
leal preferred_addr(%eax), %eax
|
||||
jmp *%eax
|
||||
|
||||
|
@ -217,6 +217,8 @@ ENTRY(efi_pe_entry)
|
||||
cmpq $0,%rax
|
||||
je 1f
|
||||
mov %rax, %rdx
|
||||
leaq startup_32(%rip), %rax
|
||||
movl %eax, BP_code32_start(%rdx)
|
||||
popq %rsi
|
||||
popq %rdi
|
||||
|
||||
@ -230,12 +232,7 @@ ENTRY(efi_stub_entry)
|
||||
hlt
|
||||
jmp 1b
|
||||
2:
|
||||
call 3f
|
||||
3:
|
||||
popq %rax
|
||||
subq $3b, %rax
|
||||
subq BP_pref_address(%rsi), %rax
|
||||
add BP_code32_start(%esi), %eax
|
||||
movl BP_code32_start(%esi), %eax
|
||||
leaq preferred_addr(%rax), %rax
|
||||
jmp *%rax
|
||||
|
||||
|
@ -24,10 +24,6 @@
|
||||
.align 16
|
||||
.Lbswap_mask:
|
||||
.octa 0x000102030405060708090a0b0c0d0e0f
|
||||
.Lpoly:
|
||||
.octa 0xc2000000000000000000000000000001
|
||||
.Ltwo_one:
|
||||
.octa 0x00000001000000000000000000000001
|
||||
|
||||
#define DATA %xmm0
|
||||
#define SHASH %xmm1
|
||||
@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update)
|
||||
.Lupdate_just_ret:
|
||||
ret
|
||||
ENDPROC(clmul_ghash_update)
|
||||
|
||||
/*
|
||||
* void clmul_ghash_setkey(be128 *shash, const u8 *key);
|
||||
*
|
||||
* Calculate hash_key << 1 mod poly
|
||||
*/
|
||||
ENTRY(clmul_ghash_setkey)
|
||||
movaps .Lbswap_mask, BSWAP
|
||||
movups (%rsi), %xmm0
|
||||
PSHUFB_XMM BSWAP %xmm0
|
||||
movaps %xmm0, %xmm1
|
||||
psllq $1, %xmm0
|
||||
psrlq $63, %xmm1
|
||||
movaps %xmm1, %xmm2
|
||||
pslldq $8, %xmm1
|
||||
psrldq $8, %xmm2
|
||||
por %xmm1, %xmm0
|
||||
# reduction
|
||||
pshufd $0b00100100, %xmm2, %xmm1
|
||||
pcmpeqd .Ltwo_one, %xmm1
|
||||
pand .Lpoly, %xmm1
|
||||
pxor %xmm1, %xmm0
|
||||
movups %xmm0, (%rdi)
|
||||
ret
|
||||
ENDPROC(clmul_ghash_setkey)
|
||||
|
@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash);
|
||||
void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
|
||||
const be128 *shash);
|
||||
|
||||
void clmul_ghash_setkey(be128 *shash, const u8 *key);
|
||||
|
||||
struct ghash_async_ctx {
|
||||
struct cryptd_ahash *cryptd_tfm;
|
||||
};
|
||||
@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
|
||||
be128 *x = (be128 *)key;
|
||||
u64 a, b;
|
||||
|
||||
if (keylen != GHASH_BLOCK_SIZE) {
|
||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
clmul_ghash_setkey(&ctx->shash, key);
|
||||
/* perform multiplication by 'x' in GF(2^128) */
|
||||
a = be64_to_cpu(x->a);
|
||||
b = be64_to_cpu(x->b);
|
||||
|
||||
ctx->shash.a = (__be64)((b << 1) | (a >> 63));
|
||||
ctx->shash.b = (__be64)((a << 1) | (b >> 63));
|
||||
|
||||
if (a >> 63)
|
||||
ctx->shash.b ^= cpu_to_be64(0xc2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -202,18 +202,15 @@ static void __init intel_remapping_check(int num, int slot, int func)
|
||||
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
|
||||
|
||||
/*
|
||||
* Revision 13 of all triggering devices id in this quirk have
|
||||
* a problem draining interrupts when irq remapping is enabled,
|
||||
* and should be flagged as broken. Additionally revisions 0x12
|
||||
* and 0x22 of device id 0x3405 has this problem.
|
||||
* Revision <= 13 of all triggering devices id in this quirk
|
||||
* have a problem draining interrupts when irq remapping is
|
||||
* enabled, and should be flagged as broken. Additionally
|
||||
* revision 0x22 of device id 0x3405 has this problem.
|
||||
*/
|
||||
if (revision == 0x13)
|
||||
if (revision <= 0x13)
|
||||
set_irq_remapping_broken();
|
||||
else if ((device == 0x3405) &&
|
||||
((revision == 0x12) ||
|
||||
(revision == 0x22)))
|
||||
else if (device == 0x3405 && revision == 0x22)
|
||||
set_irq_remapping_broken();
|
||||
|
||||
}
|
||||
|
||||
#define QFLAG_APPLY_ONCE 0x1
|
||||
|
@ -659,8 +659,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
run_sync();
|
||||
out:
|
||||
run_sync();
|
||||
return ret;
|
||||
|
||||
fail_update:
|
||||
|
@ -229,6 +229,17 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* On x86-64 we do not support 16-bit segments due to
|
||||
* IRET leaking the high bits of the kernel stack address.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!ldt_info.seg_32bit) {
|
||||
error = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
#endif
|
||||
|
||||
fill_ldt(&ldt, &ldt_info);
|
||||
if (oldmode)
|
||||
ldt.avl = 0;
|
||||
|
@ -4765,21 +4765,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
||||
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
|
||||
{
|
||||
struct ata_queued_cmd *qc = NULL;
|
||||
unsigned int i;
|
||||
unsigned int i, tag;
|
||||
|
||||
/* no command while frozen */
|
||||
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
|
||||
return NULL;
|
||||
|
||||
/* the last tag is reserved for internal command. */
|
||||
for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
|
||||
if (!test_and_set_bit(i, &ap->qc_allocated)) {
|
||||
qc = __ata_qc_from_tag(ap, i);
|
||||
for (i = 0; i < ATA_MAX_QUEUE; i++) {
|
||||
tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
|
||||
|
||||
/* the last tag is reserved for internal command. */
|
||||
if (tag == ATA_TAG_INTERNAL)
|
||||
continue;
|
||||
|
||||
if (!test_and_set_bit(tag, &ap->qc_allocated)) {
|
||||
qc = __ata_qc_from_tag(ap, tag);
|
||||
qc->tag = tag;
|
||||
ap->last_tag = tag;
|
||||
break;
|
||||
}
|
||||
|
||||
if (qc)
|
||||
qc->tag = i;
|
||||
}
|
||||
|
||||
return qc;
|
||||
}
|
||||
|
@ -3053,7 +3053,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
|
||||
int ret;
|
||||
|
||||
while (ptr) {
|
||||
ret = copy_to_user(param, ptr, sizeof(*ptr));
|
||||
struct floppy_raw_cmd cmd = *ptr;
|
||||
cmd.next = NULL;
|
||||
cmd.kernel_data = NULL;
|
||||
ret = copy_to_user(param, &cmd, sizeof(cmd));
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
param += sizeof(struct floppy_raw_cmd);
|
||||
@ -3107,10 +3110,11 @@ loop:
|
||||
return -ENOMEM;
|
||||
*rcmd = ptr;
|
||||
ret = copy_from_user(ptr, param, sizeof(*ptr));
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
ptr->next = NULL;
|
||||
ptr->buffer_length = 0;
|
||||
ptr->kernel_data = NULL;
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
param += sizeof(struct floppy_raw_cmd);
|
||||
if (ptr->cmd_count > 33)
|
||||
/* the command may now also take up the space
|
||||
@ -3126,7 +3130,6 @@ loop:
|
||||
for (i = 0; i < 16; i++)
|
||||
ptr->reply[i] = 0;
|
||||
ptr->resultcode = 0;
|
||||
ptr->kernel_data = NULL;
|
||||
|
||||
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
|
||||
if (ptr->length <= 0)
|
||||
|
@ -4040,6 +4040,7 @@ skip_create_disk:
|
||||
blk_queue_max_hw_sectors(dd->queue, 0xffff);
|
||||
blk_queue_max_segment_size(dd->queue, 0x400000);
|
||||
blk_queue_io_min(dd->queue, 4096);
|
||||
blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
|
||||
|
||||
/*
|
||||
* write back cache is not supported in the device. FUA depends on
|
||||
|
@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
|
||||
|
||||
static inline int read_all_bytes(struct si_sm_data *bt)
|
||||
{
|
||||
unsigned char i;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
|
||||
|
@ -26,41 +26,108 @@
|
||||
static unsigned int busfreq; /* FSB, in 10 kHz */
|
||||
static unsigned int max_multiplier;
|
||||
|
||||
static unsigned int param_busfreq = 0;
|
||||
static unsigned int param_max_multiplier = 0;
|
||||
|
||||
module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
|
||||
|
||||
module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
|
||||
|
||||
/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
|
||||
static struct cpufreq_frequency_table clock_ratio[] = {
|
||||
{45, /* 000 -> 4.5x */ 0},
|
||||
{50, /* 001 -> 5.0x */ 0},
|
||||
{40, /* 010 -> 4.0x */ 0},
|
||||
{55, /* 011 -> 5.5x */ 0},
|
||||
{20, /* 100 -> 2.0x */ 0},
|
||||
{30, /* 101 -> 3.0x */ 0},
|
||||
{60, /* 110 -> 6.0x */ 0},
|
||||
{55, /* 011 -> 5.5x */ 0},
|
||||
{50, /* 001 -> 5.0x */ 0},
|
||||
{45, /* 000 -> 4.5x */ 0},
|
||||
{40, /* 010 -> 4.0x */ 0},
|
||||
{35, /* 111 -> 3.5x */ 0},
|
||||
{30, /* 101 -> 3.0x */ 0},
|
||||
{20, /* 100 -> 2.0x */ 0},
|
||||
{0, CPUFREQ_TABLE_END}
|
||||
};
|
||||
|
||||
static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
|
||||
static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
|
||||
|
||||
static const struct {
|
||||
unsigned freq;
|
||||
unsigned mult;
|
||||
} usual_frequency_table[] = {
|
||||
{ 400000, 40 }, // 100 * 4
|
||||
{ 450000, 45 }, // 100 * 4.5
|
||||
{ 475000, 50 }, // 95 * 5
|
||||
{ 500000, 50 }, // 100 * 5
|
||||
{ 506250, 45 }, // 112.5 * 4.5
|
||||
{ 533500, 55 }, // 97 * 5.5
|
||||
{ 550000, 55 }, // 100 * 5.5
|
||||
{ 562500, 50 }, // 112.5 * 5
|
||||
{ 570000, 60 }, // 95 * 6
|
||||
{ 600000, 60 }, // 100 * 6
|
||||
{ 618750, 55 }, // 112.5 * 5.5
|
||||
{ 660000, 55 }, // 120 * 5.5
|
||||
{ 675000, 60 }, // 112.5 * 6
|
||||
{ 720000, 60 }, // 120 * 6
|
||||
};
|
||||
|
||||
#define FREQ_RANGE 3000
|
||||
|
||||
/**
|
||||
* powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
|
||||
*
|
||||
* Returns the current setting of the frequency multiplier. Core clock
|
||||
* Returns the current setting of the frequency multiplier. Core clock
|
||||
* speed is frequency of the Front-Side Bus multiplied with this value.
|
||||
*/
|
||||
static int powernow_k6_get_cpu_multiplier(void)
|
||||
{
|
||||
u64 invalue = 0;
|
||||
unsigned long invalue = 0;
|
||||
u32 msrval;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
msrval = POWERNOW_IOPORT + 0x1;
|
||||
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
|
||||
invalue = inl(POWERNOW_IOPORT + 0x8);
|
||||
msrval = POWERNOW_IOPORT + 0x0;
|
||||
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
|
||||
|
||||
return clock_ratio[(invalue >> 5)&7].driver_data;
|
||||
local_irq_enable();
|
||||
|
||||
return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
|
||||
}
|
||||
|
||||
static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
|
||||
{
|
||||
unsigned long outvalue, invalue;
|
||||
unsigned long msrval;
|
||||
unsigned long cr0;
|
||||
|
||||
/* we now need to transform best_i to the BVC format, see AMD#23446 */
|
||||
|
||||
/*
|
||||
* The processor doesn't respond to inquiry cycles while changing the
|
||||
* frequency, so we must disable cache.
|
||||
*/
|
||||
local_irq_disable();
|
||||
cr0 = read_cr0();
|
||||
write_cr0(cr0 | X86_CR0_CD);
|
||||
wbinvd();
|
||||
|
||||
outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
|
||||
|
||||
msrval = POWERNOW_IOPORT + 0x1;
|
||||
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
|
||||
invalue = inl(POWERNOW_IOPORT + 0x8);
|
||||
invalue = invalue & 0x1f;
|
||||
outvalue = outvalue | invalue;
|
||||
outl(outvalue, (POWERNOW_IOPORT + 0x8));
|
||||
msrval = POWERNOW_IOPORT + 0x0;
|
||||
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
|
||||
|
||||
write_cr0(cr0);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
* powernow_k6_set_state - set the PowerNow! multiplier
|
||||
@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
|
||||
static void powernow_k6_set_state(struct cpufreq_policy *policy,
|
||||
unsigned int best_i)
|
||||
{
|
||||
unsigned long outvalue = 0, invalue = 0;
|
||||
unsigned long msrval;
|
||||
struct cpufreq_freqs freqs;
|
||||
|
||||
if (clock_ratio[best_i].driver_data > max_multiplier) {
|
||||
@ -85,18 +150,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
|
||||
|
||||
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
/* we now need to transform best_i to the BVC format, see AMD#23446 */
|
||||
|
||||
outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
|
||||
|
||||
msrval = POWERNOW_IOPORT + 0x1;
|
||||
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
|
||||
invalue = inl(POWERNOW_IOPORT + 0x8);
|
||||
invalue = invalue & 0xf;
|
||||
outvalue = outvalue | invalue;
|
||||
outl(outvalue , (POWERNOW_IOPORT + 0x8));
|
||||
msrval = POWERNOW_IOPORT + 0x0;
|
||||
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
|
||||
powernow_k6_set_cpu_multiplier(best_i);
|
||||
|
||||
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
|
||||
|
||||
@ -141,18 +195,57 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int i, f;
|
||||
int result;
|
||||
unsigned khz;
|
||||
|
||||
if (policy->cpu != 0)
|
||||
return -ENODEV;
|
||||
|
||||
/* get frequencies */
|
||||
max_multiplier = powernow_k6_get_cpu_multiplier();
|
||||
busfreq = cpu_khz / max_multiplier;
|
||||
max_multiplier = 0;
|
||||
khz = cpu_khz;
|
||||
for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
|
||||
if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
|
||||
khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
|
||||
khz = usual_frequency_table[i].freq;
|
||||
max_multiplier = usual_frequency_table[i].mult;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (param_max_multiplier) {
|
||||
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
if (clock_ratio[i].driver_data == param_max_multiplier) {
|
||||
max_multiplier = param_max_multiplier;
|
||||
goto have_max_multiplier;
|
||||
}
|
||||
}
|
||||
printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!max_multiplier) {
|
||||
printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
|
||||
printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
have_max_multiplier:
|
||||
param_max_multiplier = max_multiplier;
|
||||
|
||||
if (param_busfreq) {
|
||||
if (param_busfreq >= 50000 && param_busfreq <= 150000) {
|
||||
busfreq = param_busfreq / 10;
|
||||
goto have_busfreq;
|
||||
}
|
||||
printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
busfreq = khz / max_multiplier;
|
||||
have_busfreq:
|
||||
param_busfreq = busfreq * 10;
|
||||
|
||||
/* table init */
|
||||
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
@ -164,7 +257,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
|
||||
}
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.transition_latency = 200000;
|
||||
policy->cpuinfo.transition_latency = 500000;
|
||||
policy->cur = busfreq * max_multiplier;
|
||||
|
||||
result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
|
||||
|
@ -251,7 +251,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
|
||||
spin_lock(&cpuidle_driver_lock);
|
||||
|
||||
drv = cpuidle_get_driver();
|
||||
drv->refcnt++;
|
||||
if (drv)
|
||||
drv->refcnt++;
|
||||
|
||||
spin_unlock(&cpuidle_driver_lock);
|
||||
return drv;
|
||||
|
@ -214,7 +214,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
|
||||
ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
|
||||
ct->regs.mask = PINCTRL_IRQEN(port);
|
||||
|
||||
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
|
||||
irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
|
||||
IRQ_NOREQUEST, 0);
|
||||
}
|
||||
|
||||
static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/console.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
||||
#include "cirrus_drv.h"
|
||||
|
||||
@ -75,6 +76,41 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static int cirrus_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct cirrus_device *cdev = drm_dev->dev_private;
|
||||
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
|
||||
if (cdev->mode_info.gfbdev) {
|
||||
console_lock();
|
||||
fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cirrus_pm_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct cirrus_device *cdev = drm_dev->dev_private;
|
||||
|
||||
drm_helper_resume_force_mode(drm_dev);
|
||||
|
||||
if (cdev->mode_info.gfbdev) {
|
||||
console_lock();
|
||||
fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations cirrus_driver_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
@ -105,11 +141,17 @@ static struct drm_driver driver = {
|
||||
.dumb_destroy = cirrus_dumb_destroy,
|
||||
};
|
||||
|
||||
static const struct dev_pm_ops cirrus_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
|
||||
cirrus_pm_resume)
|
||||
};
|
||||
|
||||
static struct pci_driver cirrus_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = cirrus_pci_probe,
|
||||
.remove = cirrus_pci_remove,
|
||||
.driver.pm = &cirrus_pm_ops,
|
||||
};
|
||||
|
||||
static int __init cirrus_init(void)
|
||||
|
@ -308,6 +308,9 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
WREG_HDR(hdr);
|
||||
cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
|
||||
|
||||
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
|
||||
outb(0x20, 0x3c0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -717,6 +717,14 @@ static const struct dmi_system_id intel_no_crt[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = intel_no_crt_dmi_callback,
|
||||
.ident = "DELL XPS 8700",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -119,7 +119,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
|
||||
int ret;
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
|
||||
|
||||
ret = test_bit(to_iio_dev_attr(attr)->address,
|
||||
/* Ensure ret is 0 or 1. */
|
||||
ret = !!test_bit(to_iio_dev_attr(attr)->address,
|
||||
indio_dev->buffer->scan_mask);
|
||||
|
||||
return sprintf(buf, "%d\n", ret);
|
||||
@ -762,7 +763,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
|
||||
if (!buffer->scan_mask)
|
||||
return 0;
|
||||
|
||||
return test_bit(bit, buffer->scan_mask);
|
||||
/* Ensure return value is 0 or 1. */
|
||||
return !!test_bit(bit, buffer->scan_mask);
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
|
||||
|
||||
|
@ -283,6 +283,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
||||
(my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
|
||||
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
|
||||
ehca_err(device, "Copy to udata failed.");
|
||||
cq = ERR_PTR(-EFAULT);
|
||||
goto create_cq_exit4;
|
||||
}
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
|
||||
size_t count, loff_t *off)
|
||||
{
|
||||
u32 __iomem *piobuf;
|
||||
u32 plen, clen, pbufn;
|
||||
u32 plen, pbufn, maxlen_reserve;
|
||||
struct ipath_diag_pkt odp;
|
||||
struct ipath_diag_xpkt dp;
|
||||
u32 *tmpbuf = NULL;
|
||||
@ -335,51 +335,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
|
||||
u64 val;
|
||||
u32 l_state, lt_state; /* LinkState, LinkTrainingState */
|
||||
|
||||
if (count < sizeof(odp)) {
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (count == sizeof(dp)) {
|
||||
if (copy_from_user(&dp, data, sizeof(dp))) {
|
||||
ret = -EFAULT;
|
||||
goto bail;
|
||||
}
|
||||
} else if (copy_from_user(&odp, data, sizeof(odp))) {
|
||||
ret = -EFAULT;
|
||||
} else if (count == sizeof(odp)) {
|
||||
if (copy_from_user(&odp, data, sizeof(odp))) {
|
||||
ret = -EFAULT;
|
||||
goto bail;
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Due to padding/alignment issues (lessened with new struct)
|
||||
* the old and new structs are the same length. We need to
|
||||
* disambiguate them, which we can do because odp.len has never
|
||||
* been less than the total of LRH+BTH+DETH so far, while
|
||||
* dp.unit (same offset) unit is unlikely to get that high.
|
||||
* Similarly, dp.data, the pointer to user at the same offset
|
||||
* as odp.unit, is almost certainly at least one (512byte)page
|
||||
* "above" NULL. The if-block below can be omitted if compatibility
|
||||
* between a new driver and older diagnostic code is unimportant.
|
||||
* compatibility the other direction (new diags, old driver) is
|
||||
* handled in the diagnostic code, with a warning.
|
||||
*/
|
||||
if (dp.unit >= 20 && dp.data < 512) {
|
||||
/* very probable version mismatch. Fix it up */
|
||||
memcpy(&odp, &dp, sizeof(odp));
|
||||
/* We got a legacy dp, copy elements to dp */
|
||||
dp.unit = odp.unit;
|
||||
dp.data = odp.data;
|
||||
dp.len = odp.len;
|
||||
dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
|
||||
}
|
||||
|
||||
/* send count must be an exact number of dwords */
|
||||
if (dp.len & 3) {
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
clen = dp.len >> 2;
|
||||
plen = dp.len >> 2;
|
||||
|
||||
dd = ipath_lookup(dp.unit);
|
||||
if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
|
||||
@ -422,16 +400,22 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* need total length before first word written */
|
||||
/* +1 word is for the qword padding */
|
||||
/*
|
||||
* need total length before first word written, plus 2 Dwords. One Dword
|
||||
* is for padding so we get the full user data when not aligned on
|
||||
* a word boundary. The other Dword is to make sure we have room for the
|
||||
* ICRC which gets tacked on later.
|
||||
*/
|
||||
maxlen_reserve = 2 * sizeof(u32);
|
||||
if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
|
||||
ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
|
||||
dp.len, dd->ipath_ibmaxlen);
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
plen = sizeof(u32) + dp.len;
|
||||
|
||||
if ((plen + 4) > dd->ipath_ibmaxlen) {
|
||||
ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
|
||||
plen - 4, dd->ipath_ibmaxlen);
|
||||
ret = -EINVAL;
|
||||
goto bail; /* before writing pbc */
|
||||
}
|
||||
tmpbuf = vmalloc(plen);
|
||||
if (!tmpbuf) {
|
||||
dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
|
||||
@ -473,11 +457,11 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
|
||||
*/
|
||||
if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
|
||||
ipath_flush_wc();
|
||||
__iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
|
||||
__iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
|
||||
ipath_flush_wc();
|
||||
__raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
|
||||
__raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
|
||||
} else
|
||||
__iowrite32_copy(piobuf + 2, tmpbuf, clen);
|
||||
__iowrite32_copy(piobuf + 2, tmpbuf, plen);
|
||||
|
||||
ipath_flush_wc();
|
||||
|
||||
|
@ -695,6 +695,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
|
||||
|
||||
if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
|
||||
mthca_free_cq(to_mdev(ibdev), cq);
|
||||
err = -EFAULT;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
|
@ -1186,7 +1186,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
||||
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
|
||||
kfree(nesqp->allocated_buffer);
|
||||
nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
|
||||
return NULL;
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
if (req.user_wqe_buffers) {
|
||||
virt_wqs = 1;
|
||||
|
@ -1201,7 +1201,7 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
||||
}
|
||||
|
||||
static void
|
||||
isert_put_cmd(struct isert_cmd *isert_cmd)
|
||||
isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
|
||||
{
|
||||
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
|
||||
struct isert_conn *isert_conn = isert_cmd->conn;
|
||||
@ -1216,8 +1216,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
|
||||
list_del_init(&cmd->i_conn_node);
|
||||
spin_unlock_bh(&conn->cmd_lock);
|
||||
|
||||
if (cmd->data_direction == DMA_TO_DEVICE)
|
||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||
iscsit_stop_dataout_timer(cmd);
|
||||
/*
|
||||
* Check for special case during comp_err where
|
||||
* WRITE_PENDING has been handed off from core,
|
||||
* but requires an extra target_put_sess_cmd()
|
||||
* before transport_generic_free_cmd() below.
|
||||
*/
|
||||
if (comp_err &&
|
||||
cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
|
||||
target_put_sess_cmd(se_cmd->se_sess, se_cmd);
|
||||
}
|
||||
}
|
||||
|
||||
isert_unmap_cmd(isert_cmd, isert_conn);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
@ -1271,7 +1284,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
|
||||
|
||||
static void
|
||||
isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
|
||||
struct ib_device *ib_dev)
|
||||
struct ib_device *ib_dev, bool comp_err)
|
||||
{
|
||||
if (isert_cmd->sense_buf_dma != 0) {
|
||||
pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
|
||||
@ -1281,7 +1294,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
|
||||
}
|
||||
|
||||
isert_unmap_tx_desc(tx_desc, ib_dev);
|
||||
isert_put_cmd(isert_cmd);
|
||||
isert_put_cmd(isert_cmd, comp_err);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1336,14 +1349,14 @@ isert_do_control_comp(struct work_struct *work)
|
||||
iscsit_tmr_post_handler(cmd, cmd->conn);
|
||||
|
||||
cmd->i_state = ISTATE_SENT_STATUS;
|
||||
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
|
||||
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
||||
break;
|
||||
case ISTATE_SEND_REJECT:
|
||||
pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
|
||||
atomic_dec(&isert_conn->post_send_buf_count);
|
||||
|
||||
cmd->i_state = ISTATE_SENT_STATUS;
|
||||
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
|
||||
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
||||
break;
|
||||
case ISTATE_SEND_LOGOUTRSP:
|
||||
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
|
||||
@ -1382,7 +1395,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
|
||||
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
|
||||
|
||||
cmd->i_state = ISTATE_SENT_STATUS;
|
||||
isert_completion_put(tx_desc, isert_cmd, ib_dev);
|
||||
isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1436,7 +1449,7 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
|
||||
if (!isert_cmd)
|
||||
isert_unmap_tx_desc(tx_desc, ib_dev);
|
||||
else
|
||||
isert_completion_put(tx_desc, isert_cmd, ib_dev);
|
||||
isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
|
||||
static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
|
||||
struct srpt_send_ioctx *ioctx)
|
||||
{
|
||||
struct ib_device *dev = ch->sport->sdev->device;
|
||||
struct se_cmd *cmd;
|
||||
struct scatterlist *sg, *sg_orig;
|
||||
int sg_cnt;
|
||||
@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
|
||||
|
||||
db = ioctx->rbufs;
|
||||
tsize = cmd->data_length;
|
||||
dma_len = sg_dma_len(&sg[0]);
|
||||
dma_len = ib_sg_dma_len(dev, &sg[0]);
|
||||
riu = ioctx->rdma_ius;
|
||||
|
||||
/*
|
||||
@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
|
||||
++j;
|
||||
if (j < count) {
|
||||
sg = sg_next(sg);
|
||||
dma_len = sg_dma_len(sg);
|
||||
dma_len = ib_sg_dma_len(
|
||||
dev, sg);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
|
||||
tsize = cmd->data_length;
|
||||
riu = ioctx->rdma_ius;
|
||||
sg = sg_orig;
|
||||
dma_len = sg_dma_len(&sg[0]);
|
||||
dma_addr = sg_dma_address(&sg[0]);
|
||||
dma_len = ib_sg_dma_len(dev, &sg[0]);
|
||||
dma_addr = ib_sg_dma_address(dev, &sg[0]);
|
||||
|
||||
/* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
|
||||
for (i = 0, j = 0;
|
||||
@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
|
||||
++j;
|
||||
if (j < count) {
|
||||
sg = sg_next(sg);
|
||||
dma_len = sg_dma_len(sg);
|
||||
dma_addr = sg_dma_address(sg);
|
||||
dma_len = ib_sg_dma_len(
|
||||
dev, sg);
|
||||
dma_addr = ib_sg_dma_address(
|
||||
dev, sg);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -1514,6 +1514,22 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||||
},
|
||||
.driver_data = (int []){1232, 5710, 1156, 4696},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad Edge E431 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5022, 2508, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T431s */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T440s */
|
||||
.matches = {
|
||||
@ -1522,6 +1538,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad L440 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T540p */
|
||||
.matches = {
|
||||
@ -1530,6 +1554,32 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||||
},
|
||||
.driver_data = (int []){1024, 5056, 2058, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad L540 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga S1 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
|
||||
"ThinkPad S1 Yoga"),
|
||||
},
|
||||
.driver_data = (int []){1232, 5710, 1156, 4696},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION,
|
||||
"ThinkPad X1 Carbon 2nd"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
#endif
|
||||
{ }
|
||||
};
|
||||
|
@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
|
||||
static void
|
||||
isdnloop_fake_err(isdnloop_card *card)
|
||||
{
|
||||
char buf[60];
|
||||
char buf[64];
|
||||
|
||||
sprintf(buf, "E%s", card->omsg);
|
||||
snprintf(buf, sizeof(buf), "E%s", card->omsg);
|
||||
isdnloop_fake(card, buf, -1);
|
||||
isdnloop_fake(card, "NAK", -1);
|
||||
}
|
||||
@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
|
||||
case 7:
|
||||
/* 0x;EAZ */
|
||||
p += 3;
|
||||
if (strlen(p) >= sizeof(card->eazlist[0]))
|
||||
break;
|
||||
strcpy(card->eazlist[ch - 1], p);
|
||||
break;
|
||||
case 8:
|
||||
@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
|
||||
return -EBUSY;
|
||||
if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
|
||||
return -EFAULT;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&card->isdnloop_lock, flags);
|
||||
switch (sdef.ptype) {
|
||||
case ISDN_PTYPE_EURO:
|
||||
@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
|
||||
{
|
||||
ulong a;
|
||||
int i;
|
||||
char cbuf[60];
|
||||
char cbuf[80];
|
||||
isdn_ctrl cmd;
|
||||
isdnloop_cdef cdef;
|
||||
|
||||
@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
|
||||
break;
|
||||
if ((c->arg & 255) < ISDNLOOP_BCH) {
|
||||
char *p;
|
||||
char dial[50];
|
||||
char dcode[4];
|
||||
|
||||
a = c->arg;
|
||||
@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
|
||||
} else
|
||||
/* Normal Dial */
|
||||
strcpy(dcode, "CAL");
|
||||
strcpy(dial, p);
|
||||
sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
|
||||
dcode, dial, c->parm.setup.si1,
|
||||
c->parm.setup.si2, c->parm.setup.eazmsn);
|
||||
snprintf(cbuf, sizeof(cbuf),
|
||||
"%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
|
||||
dcode, p, c->parm.setup.si1,
|
||||
c->parm.setup.si2, c->parm.setup.eazmsn);
|
||||
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
|
||||
}
|
||||
break;
|
||||
|
@ -511,8 +511,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
|
||||
disk_super = dm_block_data(sblock);
|
||||
update_flags(disk_super, mutator);
|
||||
read_superblock_fields(cmd, disk_super);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return dm_bm_flush_and_unlock(cmd->bm, sblock);
|
||||
return dm_bm_flush(cmd->bm);
|
||||
}
|
||||
|
||||
static int __begin_transaction(struct dm_cache_metadata *cmd)
|
||||
|
@ -1322,9 +1322,9 @@ static void process_deferred_bios(struct pool *pool)
|
||||
*/
|
||||
if (ensure_next_mapping(pool)) {
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_add(&pool->deferred_bios, bio);
|
||||
bio_list_merge(&pool->deferred_bios, &bios);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bm_unlock);
|
||||
|
||||
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
|
||||
struct dm_block *superblock)
|
||||
int dm_bm_flush(struct dm_block_manager *bm)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (bm->read_only)
|
||||
return -EPERM;
|
||||
|
||||
r = dm_bufio_write_dirty_buffers(bm->bufio);
|
||||
if (unlikely(r)) {
|
||||
dm_bm_unlock(superblock);
|
||||
return r;
|
||||
}
|
||||
|
||||
dm_bm_unlock(superblock);
|
||||
|
||||
return dm_bufio_write_dirty_buffers(bm->bufio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
|
||||
EXPORT_SYMBOL_GPL(dm_bm_flush);
|
||||
|
||||
void dm_bm_set_read_only(struct dm_block_manager *bm)
|
||||
{
|
||||
|
@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b);
|
||||
*
|
||||
* This method always blocks.
|
||||
*/
|
||||
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
|
||||
struct dm_block *superblock);
|
||||
int dm_bm_flush(struct dm_block_manager *bm);
|
||||
|
||||
/*
|
||||
* Switches the bm to a read only mode. Once read-only mode
|
||||
|
@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm)
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
return dm_bm_flush(tm->bm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
|
||||
|
||||
@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
|
||||
return -EWOULDBLOCK;
|
||||
|
||||
wipe_shadow_table(tm);
|
||||
dm_bm_unlock(root);
|
||||
|
||||
return dm_bm_flush_and_unlock(tm->bm, root);
|
||||
return dm_bm_flush(tm->bm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_tm_commit);
|
||||
|
||||
|
@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac
|
||||
/*
|
||||
* We use a 2-phase commit here.
|
||||
*
|
||||
* i) In the first phase the block manager is told to start flushing, and
|
||||
* the changes to the space map are written to disk. You should interrogate
|
||||
* your particular space map to get detail of its root node etc. to be
|
||||
* included in your superblock.
|
||||
* i) Make all changes for the transaction *except* for the superblock.
|
||||
* Then call dm_tm_pre_commit() to flush them to disk.
|
||||
*
|
||||
* ii) @root will be committed last. You shouldn't use more than the
|
||||
* first 512 bytes of @root if you wish the transaction to survive a power
|
||||
* failure. You *must* have a write lock held on @root for both stage (i)
|
||||
* and (ii). The commit will drop the write lock.
|
||||
* ii) Lock your superblock. Update. Then call dm_tm_commit() which will
|
||||
* unlock the superblock and flush it. No other blocks should be updated
|
||||
* during this period. Care should be taken to never unlock a partially
|
||||
* updated superblock; perform any operations that could fail *before* you
|
||||
* take the superblock lock.
|
||||
*/
|
||||
int dm_tm_pre_commit(struct dm_transaction_manager *tm);
|
||||
int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root);
|
||||
int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock);
|
||||
|
||||
/*
|
||||
* These methods are the only way to get hold of a writeable block.
|
||||
|
@ -712,6 +712,22 @@ static int m88rs2000_get_frontend(struct dvb_frontend *fe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int m88rs2000_get_tune_settings(struct dvb_frontend *fe,
|
||||
struct dvb_frontend_tune_settings *tune)
|
||||
{
|
||||
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
|
||||
|
||||
if (c->symbol_rate > 3000000)
|
||||
tune->min_delay_ms = 2000;
|
||||
else
|
||||
tune->min_delay_ms = 3000;
|
||||
|
||||
tune->step_size = c->symbol_rate / 16000;
|
||||
tune->max_drift = c->symbol_rate / 2000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int m88rs2000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
|
||||
{
|
||||
struct m88rs2000_state *state = fe->demodulator_priv;
|
||||
@ -743,7 +759,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
|
||||
.symbol_rate_tolerance = 500, /* ppm */
|
||||
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
|
||||
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
|
||||
FE_CAN_QPSK |
|
||||
FE_CAN_QPSK | FE_CAN_INVERSION_AUTO |
|
||||
FE_CAN_FEC_AUTO
|
||||
},
|
||||
|
||||
@ -763,6 +779,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
|
||||
|
||||
.set_frontend = m88rs2000_set_frontend,
|
||||
.get_frontend = m88rs2000_get_frontend,
|
||||
.get_tune_settings = m88rs2000_get_tune_settings,
|
||||
};
|
||||
|
||||
struct dvb_frontend *m88rs2000_attach(const struct m88rs2000_config *config,
|
||||
|
@ -8045,8 +8045,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
|
||||
break;
|
||||
} /* switch() */
|
||||
|
||||
/* initialize tuner */
|
||||
if (TUNER_ABSENT != dev->tuner_type) {
|
||||
/* initialize tuner (don't do this when resuming) */
|
||||
if (!dev->insuspend && TUNER_ABSENT != dev->tuner_type) {
|
||||
int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
|
||||
|
||||
/* Note: radio tuner address is always filled in,
|
||||
|
@ -1079,6 +1079,7 @@ static void preview_config_input_format(struct isp_prev_device *prev,
|
||||
*/
|
||||
static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
|
||||
{
|
||||
const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
|
||||
struct isp_device *isp = to_isp_device(prev);
|
||||
unsigned int sph = prev->crop.left;
|
||||
unsigned int eph = prev->crop.left + prev->crop.width - 1;
|
||||
@ -1086,6 +1087,14 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
|
||||
unsigned int elv = prev->crop.top + prev->crop.height - 1;
|
||||
u32 features;
|
||||
|
||||
if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
|
||||
format->code != V4L2_MBUS_FMT_Y10_1X10) {
|
||||
sph -= 2;
|
||||
eph += 2;
|
||||
slv -= 2;
|
||||
elv += 2;
|
||||
}
|
||||
|
||||
features = (prev->params.params[0].features & active)
|
||||
| (prev->params.params[1].features & ~active);
|
||||
|
||||
|
@ -673,7 +673,8 @@ static void pctv_520e_init(struct em28xx *dev)
|
||||
static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
|
||||
{
|
||||
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
|
||||
struct em28xx *dev = fe->dvb->priv;
|
||||
struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
|
||||
struct em28xx *dev = i2c_bus->dev;
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
struct em28xx_dvb *dvb = dev->dvb;
|
||||
int ret;
|
||||
|
@ -2394,6 +2394,7 @@ static const struct usb_device_id device_table[] = {
|
||||
{USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
|
||||
{USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
|
||||
{USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
|
||||
{USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)},
|
||||
{USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
|
||||
{USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
|
||||
{USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
|
||||
|
@ -1846,7 +1846,25 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
|
||||
|
||||
if (!enable) {
|
||||
uvc_uninit_video(stream, 1);
|
||||
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
|
||||
if (stream->intf->num_altsetting > 1) {
|
||||
usb_set_interface(stream->dev->udev,
|
||||
stream->intfnum, 0);
|
||||
} else {
|
||||
/* UVC doesn't specify how to inform a bulk-based device
|
||||
* when the video stream is stopped. Windows sends a
|
||||
* CLEAR_FEATURE(HALT) request to the video streaming
|
||||
* bulk endpoint, mimic the same behaviour.
|
||||
*/
|
||||
unsigned int epnum = stream->header.bEndpointAddress
|
||||
& USB_ENDPOINT_NUMBER_MASK;
|
||||
unsigned int dir = stream->header.bEndpointAddress
|
||||
& USB_ENDPOINT_DIR_MASK;
|
||||
unsigned int pipe;
|
||||
|
||||
pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
|
||||
usb_clear_halt(stream->dev->udev, pipe);
|
||||
}
|
||||
|
||||
uvc_queue_enable(&stream->queue, 0);
|
||||
uvc_video_clock_cleanup(stream);
|
||||
return 0;
|
||||
|
@ -812,8 +812,8 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
|
||||
#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
|
||||
#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
|
||||
#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
|
||||
#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 63, struct v4l2_subdev_edid32)
|
||||
#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 64, struct v4l2_subdev_edid32)
|
||||
#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 40, struct v4l2_subdev_edid32)
|
||||
#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 41, struct v4l2_subdev_edid32)
|
||||
#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
|
||||
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
|
||||
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
|
||||
|
@ -1179,12 +1179,18 @@ static int pm860x_probe(struct i2c_client *client,
|
||||
chip->companion_addr = pdata->companion_addr;
|
||||
chip->companion = i2c_new_dummy(chip->client->adapter,
|
||||
chip->companion_addr);
|
||||
if (!chip->companion) {
|
||||
dev_err(&client->dev,
|
||||
"Failed to allocate I2C companion device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
chip->regmap_companion = regmap_init_i2c(chip->companion,
|
||||
&pm860x_regmap_config);
|
||||
if (IS_ERR(chip->regmap_companion)) {
|
||||
ret = PTR_ERR(chip->regmap_companion);
|
||||
dev_err(&chip->companion->dev,
|
||||
"Failed to allocate register map: %d\n", ret);
|
||||
i2c_unregister_device(chip->companion);
|
||||
return ret;
|
||||
}
|
||||
i2c_set_clientdata(chip->companion, chip);
|
||||
|
@ -121,6 +121,10 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
|
||||
dev_info(max77686->dev, "device found\n");
|
||||
|
||||
max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
|
||||
if (!max77686->rtc) {
|
||||
dev_err(max77686->dev, "Failed to allocate I2C device for RTC\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
i2c_set_clientdata(max77686->rtc, max77686);
|
||||
|
||||
max77686_irq_init(max77686);
|
||||
|
@ -149,9 +149,18 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
|
||||
dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
|
||||
|
||||
max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
|
||||
if (!max77693->muic) {
|
||||
dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
i2c_set_clientdata(max77693->muic, max77693);
|
||||
|
||||
max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
|
||||
if (!max77693->haptic) {
|
||||
dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
|
||||
ret = -ENODEV;
|
||||
goto err_i2c_haptic;
|
||||
}
|
||||
i2c_set_clientdata(max77693->haptic, max77693);
|
||||
|
||||
/*
|
||||
@ -187,8 +196,9 @@ err_mfd:
|
||||
max77693_irq_exit(max77693);
|
||||
err_irq:
|
||||
err_regmap_muic:
|
||||
i2c_unregister_device(max77693->muic);
|
||||
i2c_unregister_device(max77693->haptic);
|
||||
err_i2c_haptic:
|
||||
i2c_unregister_device(max77693->muic);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -180,9 +180,18 @@ static int max8925_probe(struct i2c_client *client,
|
||||
mutex_init(&chip->io_lock);
|
||||
|
||||
chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR);
|
||||
if (!chip->rtc) {
|
||||
dev_err(chip->dev, "Failed to allocate I2C device for RTC\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
i2c_set_clientdata(chip->rtc, chip);
|
||||
|
||||
chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
|
||||
if (!chip->adc) {
|
||||
dev_err(chip->dev, "Failed to allocate I2C device for ADC\n");
|
||||
i2c_unregister_device(chip->rtc);
|
||||
return -ENODEV;
|
||||
}
|
||||
i2c_set_clientdata(chip->adc, chip);
|
||||
|
||||
device_init_wakeup(&client->dev, 1);
|
||||
|
@ -218,10 +218,26 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
|
||||
mutex_init(&max8997->iolock);
|
||||
|
||||
max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
|
||||
if (!max8997->rtc) {
|
||||
dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
i2c_set_clientdata(max8997->rtc, max8997);
|
||||
|
||||
max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
|
||||
if (!max8997->haptic) {
|
||||
dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n");
|
||||
ret = -ENODEV;
|
||||
goto err_i2c_haptic;
|
||||
}
|
||||
i2c_set_clientdata(max8997->haptic, max8997);
|
||||
|
||||
max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
|
||||
if (!max8997->muic) {
|
||||
dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n");
|
||||
ret = -ENODEV;
|
||||
goto err_i2c_muic;
|
||||
}
|
||||
i2c_set_clientdata(max8997->muic, max8997);
|
||||
|
||||
pm_runtime_set_active(max8997->dev);
|
||||
@ -248,7 +264,9 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
|
||||
err_mfd:
|
||||
mfd_remove_devices(max8997->dev);
|
||||
i2c_unregister_device(max8997->muic);
|
||||
err_i2c_muic:
|
||||
i2c_unregister_device(max8997->haptic);
|
||||
err_i2c_haptic:
|
||||
i2c_unregister_device(max8997->rtc);
|
||||
err:
|
||||
kfree(max8997);
|
||||
|
@ -152,6 +152,10 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
|
||||
mutex_init(&max8998->iolock);
|
||||
|
||||
max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
|
||||
if (!max8998->rtc) {
|
||||
dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
i2c_set_clientdata(max8998->rtc, max8998);
|
||||
|
||||
max8998_irq_init(max8998);
|
||||
|
@ -199,6 +199,10 @@ static int sec_pmic_probe(struct i2c_client *i2c,
|
||||
}
|
||||
|
||||
sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
|
||||
if (!sec_pmic->rtc) {
|
||||
dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
|
||||
|
||||
if (pdata && pdata->cfg_pmic_irq)
|
||||
|
@ -254,8 +254,10 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
|
||||
ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
|
||||
IRQF_ONESHOT, pdata->irq_base,
|
||||
tps6591x_irqs_chip, &tps65910->irq_data);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
|
||||
tps65910->chip_irq = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -115,6 +115,11 @@
|
||||
#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
|
||||
|
||||
#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
|
||||
|
||||
/* Host Firmware Status Registers in PCI Config Space */
|
||||
#define PCI_CFG_HFS_1 0x40
|
||||
#define PCI_CFG_HFS_2 0x48
|
||||
|
||||
/*
|
||||
* MEI HW Section
|
||||
*/
|
||||
|
@ -105,15 +105,31 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
u32 reg;
|
||||
if (ent->device == MEI_DEV_ID_PBG_1) {
|
||||
pci_read_config_dword(pdev, 0x48, ®);
|
||||
/* make sure that bit 9 is up and bit 10 is down */
|
||||
if ((reg & 0x600) == 0x200) {
|
||||
dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
|
||||
return false;
|
||||
}
|
||||
/* Cougar Point || Patsburg */
|
||||
if (ent->device == MEI_DEV_ID_CPT_1 ||
|
||||
ent->device == MEI_DEV_ID_PBG_1) {
|
||||
pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®);
|
||||
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
|
||||
if ((reg & 0x600) == 0x200)
|
||||
goto no_mei;
|
||||
}
|
||||
|
||||
/* Lynx Point */
|
||||
if (ent->device == MEI_DEV_ID_LPT_H ||
|
||||
ent->device == MEI_DEV_ID_LPT_W ||
|
||||
ent->device == MEI_DEV_ID_LPT_HR) {
|
||||
/* Read ME FW Status check for SPS Firmware */
|
||||
pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®);
|
||||
/* if bits [19:16] = 15, running SPS Firmware */
|
||||
if ((reg & 0xf0000) == 0xf0000)
|
||||
goto no_mei;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
no_mei:
|
||||
dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* mei_probe - Device Initialization Routine
|
||||
|
@ -1096,6 +1096,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
|
||||
goto err_pmecc_data_alloc;
|
||||
}
|
||||
|
||||
nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
|
||||
nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
|
||||
nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
|
||||
|
||||
|
@ -225,7 +225,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
|
||||
val = __raw_readl(nand->reg + REG_FMICSR);
|
||||
|
||||
if (!(val & NAND_EN))
|
||||
__raw_writel(val | NAND_EN, REG_FMICSR);
|
||||
__raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
|
||||
|
||||
val = __raw_readl(nand->reg + REG_SMCSR);
|
||||
|
||||
|
@ -59,15 +59,12 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
|
||||
struct attribute_group *attr_group;
|
||||
struct attribute **attributes;
|
||||
struct sm_sysfs_attribute *vendor_attribute;
|
||||
char *vendor;
|
||||
|
||||
int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
|
||||
SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
|
||||
|
||||
char *vendor = kmalloc(vendor_len, GFP_KERNEL);
|
||||
vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
|
||||
SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
|
||||
if (!vendor)
|
||||
goto error1;
|
||||
memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
|
||||
vendor[vendor_len] = 0;
|
||||
|
||||
/* Initialize sysfs attributes */
|
||||
vendor_attribute =
|
||||
@ -78,7 +75,7 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
|
||||
sysfs_attr_init(&vendor_attribute->dev_attr.attr);
|
||||
|
||||
vendor_attribute->data = vendor;
|
||||
vendor_attribute->len = vendor_len;
|
||||
vendor_attribute->len = strlen(vendor);
|
||||
vendor_attribute->dev_attr.attr.name = "vendor";
|
||||
vendor_attribute->dev_attr.attr.mode = S_IRUGO;
|
||||
vendor_attribute->dev_attr.show = sm_attr_show;
|
||||
|
@ -17308,8 +17308,6 @@ static int tg3_init_one(struct pci_dev *pdev,
|
||||
|
||||
tg3_init_bufmgr_config(tp);
|
||||
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
/* 5700 B0 chips do not support checksumming correctly due
|
||||
* to hardware bugs.
|
||||
*/
|
||||
@ -17341,7 +17339,8 @@ static int tg3_init_one(struct pci_dev *pdev,
|
||||
features |= NETIF_F_TSO_ECN;
|
||||
}
|
||||
|
||||
dev->features |= features;
|
||||
dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->vlan_features |= features;
|
||||
|
||||
/*
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user