Merge 3.10.105 into android-msm-bullhead-3.10-oreo-m5

Changes in 3.10.105: (315 commits)
        sched/core: Fix a race between try_to_wake_up() and a woken up task
        sched/core: Fix an SMP ordering race in try_to_wake_up() vs. schedule()
        crypto: algif_skcipher - Require setkey before accept(2)
        crypto: af_alg - Disallow bind/setkey/... after accept(2)
        crypto: af_alg - Add nokey compatibility path
        crypto: algif_skcipher - Add nokey compatibility path
        crypto: hash - Add crypto_ahash_has_setkey
        crypto: shash - Fix has_key setting
        crypto: algif_hash - Require setkey before accept(2)
        crypto: skcipher - Add crypto_skcipher_has_setkey
        crypto: algif_skcipher - Add key check exception for cipher_null
        crypto: af_alg - Allow af_af_alg_release_parent to be called on nokey path
        crypto: algif_hash - Remove custom release parent function
        crypto: algif_skcipher - Remove custom release parent function
        crypto: af_alg - Forbid bind(2) when nokey child sockets are present
        crypto: algif_hash - Fix race condition in hash_check_key
        crypto: algif_skcipher - Fix race condition in skcipher_check_key
        crypto: algif_skcipher - Load TX SG list after waiting
        crypto: cryptd - initialize child shash_desc on import
        crypto: skcipher - Fix blkcipher walk OOM crash
        crypto: gcm - Fix IV buffer size in crypto_gcm_setkey
        MIPS: KVM: Fix unused variable build warning
        KVM: MIPS: Precalculate MMIO load resume PC
        KVM: MIPS: Drop other CPU ASIDs on guest MMU changes
        KVM: nVMX: postpone VMCS changes on MSR_IA32_APICBASE write
        KVM: MIPS: Make ERET handle ERL before EXL
        KVM: x86: fix wbinvd_dirty_mask use-after-free
        KVM: x86: fix missed SRCU usage in kvm_lapic_set_vapic_addr
        KVM: Disable irq while unregistering user notifier
        PM / devfreq: Fix incorrect type issue.
        ppp: defer netns reference release for ppp channel
        x86/mm/xen: Suppress hugetlbfs in PV guests
        xen: Add RING_COPY_REQUEST()
        xen-netback: don't use last request to determine minimum Tx credit
        xen-netback: use RING_COPY_REQUEST() throughout
        xen-blkback: only read request operation from shared ring once
        xen/pciback: Save xen_pci_op commands before processing it
        xen/pciback: Save the number of MSI-X entries to be copied later.
        xen/pciback: Return error on XEN_PCI_OP_enable_msi when device has MSI or MSI-X enabled
        xen/pciback: Return error on XEN_PCI_OP_enable_msix when device has MSI or MSI-X enabled
        xen/pciback: Do not install an IRQ handler for MSI interrupts.
        xen/pciback: For XEN_PCI_OP_disable_msi[|x] only disable if device has MSI(X) enabled.
        xen/pciback: Don't allow MSI-X ops if PCI_COMMAND_MEMORY is not set.
        xen-pciback: Add name prefix to global 'permissive' variable
        x86/xen: fix upper bound of pmd loop in xen_cleanhighmap()
        x86/traps: Ignore high word of regs->cs in early_idt_handler_common
        x86/mm: Disable preemption during CR3 read+write
        x86/apic: Do not init irq remapping if ioapic is disabled
        x86/mm/pat, /dev/mem: Remove superfluous error message
        x86/paravirt: Do not trace _paravirt_ident_*() functions
        x86/build: Build compressed x86 kernels as PIE
        x86/um: reuse asm-generic/barrier.h
        iommu/amd: Update Alias-DTE in update_device_table()
        iommu/amd: Free domain id when free a domain of struct dma_ops_domain
        ARM: 8616/1: dt: Respect property size when parsing CPUs
        ARM: 8618/1: decompressor: reset ttbcr fields to use TTBR0 on ARMv7
        ARM: sa1100: clear reset status prior to reboot
        ARM: sa1111: fix pcmcia suspend/resume
        arm64: avoid returning from bad_mode
        arm64: Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO
        arm64: spinlocks: implement smp_mb__before_spinlock() as smp_mb()
        arm64: debug: avoid resetting stepping state machine when TIF_SINGLESTEP
        MIPS: Malta: Fix IOCU disable switch read for MIPS64
        MIPS: ptrace: Fix regs_return_value for kernel context
        powerpc/mm: Don't alias user region to other regions below PAGE_OFFSET
        powerpc/vdso64: Use double word compare on pointers
        powerpc/powernv: Use CPU-endian PEST in pnv_pci_dump_p7ioc_diag_data()
        powerpc/64: Fix incorrect return value from __copy_tofrom_user
        powerpc/nvram: Fix an incorrect partition merge
        avr32: fix copy_from_user()
        avr32: fix 'undefined reference to `___copy_from_user'
        avr32: off by one in at32_init_pio()
        s390/dasd: fix hanging device after clear subchannel
        parisc: Ensure consistent state when switching to kernel stack at syscall entry
        microblaze: fix __get_user()
        microblaze: fix copy_from_user()
        mn10300: failing __get_user() and get_user() should zero
        m32r: fix __get_user()
        sh64: failing __get_user() should zero
        score: fix __get_user/get_user
        s390: get_user() should zero on failure
        ARC: uaccess: get_user to zero out dest in cause of fault
        asm-generic: make get_user() clear the destination on errors
        frv: fix clear_user()
        cris: buggered copy_from_user/copy_to_user/clear_user
        blackfin: fix copy_from_user()
        score: fix copy_from_user() and friends
        sh: fix copy_from_user()
        hexagon: fix strncpy_from_user() error return
        mips: copy_from_user() must zero the destination on access_ok() failure
        asm-generic: make copy_from_user() zero the destination properly
        alpha: fix copy_from_user()
        metag: copy_from_user() should zero the destination on access_ok() failure
        parisc: fix copy_from_user()
        openrisc: fix copy_from_user()
        openrisc: fix the fix of copy_from_user()
        mn10300: copy_from_user() should zero on access_ok() failure...
        sparc32: fix copy_from_user()
        ppc32: fix copy_from_user()
        ia64: copy_from_user() should zero the destination on access_ok() failure
        fix fault_in_multipages_...() on architectures with no-op access_ok()
        fix memory leaks in tracing_buffers_splice_read()
        arc: don't leak bits of kernel stack into coredump
        Fix potential infoleak in older kernels
        swapfile: fix memory corruption via malformed swapfile
        coredump: fix unfreezable coredumping task
        usb: dwc3: gadget: increment request->actual once
        USB: validate wMaxPacketValue entries in endpoint descriptors
        USB: fix typo in wMaxPacketSize validation
        usb: xhci: Fix panic if disconnect
        USB: serial: fix memleak in driver-registration error path
        USB: kobil_sct: fix non-atomic allocation in write path
        USB: serial: mos7720: fix non-atomic allocation in write path
        USB: serial: mos7840: fix non-atomic allocation in write path
        usb: renesas_usbhs: fix clearing the {BRDY,BEMP}STS condition
        USB: change bInterval default to 10 ms
        usb: gadget: fsl_qe_udc: signedness bug in qe_get_frame()
        USB: serial: cp210x: fix hardware flow-control disable
        usb: misc: legousbtower: Fix NULL pointer deference
        usb: gadget: function: u_ether: don't starve tx request queue
        USB: serial: cp210x: fix tiocmget error handling
        usb: gadget: u_ether: remove interrupt throttling
        usb: chipidea: move the lock initialization to core file
        Fix USB CB/CBI storage devices with CONFIG_VMAP_STACK=y
        ALSA: rawmidi: Fix possible deadlock with virmidi registration
        ALSA: timer: fix NULL pointer dereference in read()/ioctl() race
        ALSA: timer: fix division by zero after SNDRV_TIMER_IOCTL_CONTINUE
        ALSA: timer: fix NULL pointer dereference on memory allocation failure
        ALSA: ali5451: Fix out-of-bound position reporting
        ALSA: pcm : Call kill_fasync() in stream lock
        zfcp: fix fc_host port_type with NPIV
        zfcp: fix ELS/GS request&response length for hardware data router
        zfcp: close window with unblocked rport during rport gone
        zfcp: retain trace level for SCSI and HBA FSF response records
        zfcp: restore: Dont use 0 to indicate invalid LUN in rec trace
        zfcp: trace on request for open and close of WKA port
        zfcp: restore tracing of handle for port and LUN with HBA records
        zfcp: fix D_ID field with actual value on tracing SAN responses
        zfcp: fix payload trace length for SAN request&response
        zfcp: trace full payload of all SAN records (req,resp,iels)
        scsi: zfcp: spin_lock_irqsave() is not nestable
        scsi: mpt3sas: Fix secure erase premature termination
        scsi: mpt3sas: Unblock device after controller reset
        scsi: mpt3sas: fix hang on ata passthrough commands
        mpt2sas: Fix secure erase premature termination
        scsi: megaraid_sas: Fix data integrity failure for JBOD (passthrough) devices
        scsi: megaraid_sas: fix macro MEGASAS_IS_LOGICAL to avoid regression
        scsi: ibmvfc: Fix I/O hang when port is not mapped
        scsi: Fix use-after-free
        scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
        scsi: scsi_debug: Fix memory leak if LBP enabled and module is unloaded
        scsi: arcmsr: Send SYNCHRONIZE_CACHE command to firmware
        ext4: validate that metadata blocks do not overlap superblock
        ext4: avoid modifying checksum fields directly during checksum verification
        ext4: use __GFP_NOFAIL in ext4_free_blocks()
        ext4: reinforce check of i_dtime when clearing high fields of uid and gid
        ext4: allow DAX writeback for hole punch
        ext4: sanity check the block and cluster size at mount time
        reiserfs: fix "new_insert_key may be used uninitialized ..."
        reiserfs: Unlock superblock before calling reiserfs_quota_on_mount()
        xfs: fix superblock inprogress check
        libxfs: clean up _calc_dquots_per_chunk
        btrfs: ensure that file descriptor used with subvol ioctls is a dir
        ocfs2/dlm: fix race between convert and migration
        ocfs2: fix start offset to ocfs2_zero_range_for_truncate()
        ubifs: Fix assertion in layout_in_gaps()
        ubifs: Fix xattr_names length in exit paths
        UBIFS: Fix possible memory leak in ubifs_readdir()
        ubifs: Abort readdir upon error
        ubifs: Fix regression in ubifs_readdir()
        UBI: fastmap: scrub PEB when bitflips are detected in a free PEB EC header
        NFSv4.x: Fix a refcount leak in nfs_callback_up_net
        NFSD: Using free_conn free connection
        NFS: Don't drop CB requests with invalid principals
        NFSv4: Open state recovery must account for file permission changes
        fs/seq_file: fix out-of-bounds read
        fs/super.c: fix race between freeze_super() and thaw_super()
        isofs: Do not return EACCES for unknown filesystems
        hostfs: Freeing an ERR_PTR in hostfs_fill_sb_common()
        driver core: Delete an unnecessary check before the function call "put_device"
        driver core: fix race between creating/querying glue dir and its cleanup
        drm/radeon: fix radeon_move_blit on 32bit systems
        drm: Reject page_flip for !DRIVER_MODESET
        drm/radeon: Ensure vblank interrupt is enabled on DPMS transition to on
        qxl: check for kmap failures
        Input: i8042 - break load dependency between atkbd/psmouse and i8042
        Input: i8042 - set up shared ps2_cmd_mutex for AUX ports
        Input: ili210x - fix permissions on "calibrate" attribute
        hwrng: exynos - Disable runtime PM on probe failure
        hwrng: omap - Fix assumption that runtime_get_sync will always succeed
        hwrng: omap - Only fail if pm_runtime_get_sync returns < 0
        i2c-eg20t: fix race between i2c init and interrupt enable
        em28xx-i2c: rt_mutex_trylock() returns zero on failure
        i2c: core: fix NULL pointer dereference under race condition
        i2c: at91: fix write transfers by clearing pending interrupt first
        iio: accel: kxsd9: Fix raw read return
        iio: accel: kxsd9: Fix scaling bug
        thermal: hwmon: Properly report critical temperature in sysfs
        cdc-acm: fix wrong pipe type on rx interrupt xfers
        timers: Use proper base migration in add_timer_on()
        EDAC: Increment correct counter in edac_inc_ue_error()
        IB/ipoib: Fix memory corruption in ipoib cm mode connect flow
        IB/core: Fix use after free in send_leave function
        IB/ipoib: Don't allow MC joins during light MC flush
        IB/mlx4: Fix incorrect MC join state bit-masking on SR-IOV
        IB/mlx4: Fix create CQ error flow
        IB/uverbs: Fix leak of XRC target QPs
        IB/cm: Mark stale CM id's whenever the mad agent was unregistered
        mtd: blkdevs: fix potential deadlock + lockdep warnings
        mtd: pmcmsp-flash: Allocating too much in init_msp_flash()
        mtd: nand: davinci: Reinitialize the HW ECC engine in 4bit hwctl
        perf symbols: Fixup symbol sizes before picking best ones
        perf: Tighten (and fix) the grouping condition
        tty: Prevent ldisc drivers from re-using stale tty fields
        tty: limit terminal size to 4M chars
        tty: vt, fix bogus division in csi_J
        vt: clear selection before resizing
        drivers/vfio: Rework offsetofend()
        include/stddef.h: Move offsetofend() from vfio.h to a generic kernel header
        stddef.h: move offsetofend inside #ifndef/#endif guard, neaten
        ipv6: don't call fib6_run_gc() until routing is ready
        ipv6: split duplicate address detection and router solicitation timer
        ipv6: move DAD and addrconf_verify processing to workqueue
        ipv6: addrconf: fix dev refcont leak when DAD failed
        ipv6: fix rtnl locking in setsockopt for anycast and multicast
        ip6_gre: fix flowi6_proto value in ip6gre_xmit_other()
        ipv6: correctly add local routes when lo goes up
        ipv6: dccp: fix out of bound access in dccp_v6_err()
        ipv6: dccp: add missing bind_conflict to dccp_ipv6_mapped
        ip6_tunnel: Clear IP6CB in ip6tunnel_xmit()
        ip6_tunnel: disable caching when the traffic class is inherited
        net/irda: handle iriap_register_lsap() allocation failure
        tcp: fix use after free in tcp_xmit_retransmit_queue()
        tcp: properly scale window in tcp_v[46]_reqsk_send_ack()
        tcp: fix overflow in __tcp_retransmit_skb()
        tcp: fix wrong checksum calculation on MTU probing
        tcp: take care of truncations done by sk_filter()
        bonding: Fix bonding crash
        net: ratelimit warnings about dst entry refcount underflow or overflow
        mISDN: Support DR6 indication in mISDNipac driver
        mISDN: Fixing missing validation in base_sock_bind()
        net: disable fragment reassembly if high_thresh is set to zero
        ipvs: count pre-established TCP states as active
        iwlwifi: pcie: fix access to scratch buffer
        svc: Avoid garbage replies when pc_func() returns rpc_drop_reply
        brcmsmac: Free packet if dma_mapping_error() fails in dma_rxfill
        brcmsmac: Initialize power in brcms_c_stf_ss_algo_channel_get()
        brcmfmac: avoid potential stack overflow in brcmf_cfg80211_start_ap()
        pstore: Fix buffer overflow while write offset equal to buffer size
        net/mlx4_core: Allow resetting VF admin mac to zero
        firewire: net: guard against rx buffer overflows
        firewire: net: fix fragmented datagram_size off-by-one
        netfilter: fix namespace handling in nf_log_proc_dostring
        can: bcm: fix warning in bcm_connect/proc_register
        net: fix sk_mem_reclaim_partial()
        net: avoid sk_forward_alloc overflows
        ipmr, ip6mr: fix scheduling while atomic and a deadlock with ipmr_get_route
        packet: call fanout_release, while UNREGISTERING a netdev
        net: sctp, forbid negative length
        sctp: validate chunk len before actually using it
        net: clear sk_err_soft in sk_clone_lock()
        net: mangle zero checksum in skb_checksum_help()
        dccp: do not send reset to already closed sockets
        dccp: fix out of bound access in dccp_v4_err()
        sctp: assign assoc_id earlier in __sctp_connect
        neigh: check error pointer instead of NULL for ipv4_neigh_lookup()
        ipv4: use new_gw for redirect neigh lookup
        mac80211: fix purging multicast PS buffer queue
        mac80211: discard multicast and 4-addr A-MSDUs
        cfg80211: limit scan results cache size
        mwifiex: printk() overflow with 32-byte SSIDs
        ipv4: Set skb->protocol properly for local output
        net: sky2: Fix shutdown crash
        kaweth: fix firmware download
        tracing: Move mutex to protect against resetting of seq data
        kernel/fork: fix CLONE_CHILD_CLEARTID regression in nscd
        Revert "ipc/sem.c: optimize sem_lock()"
        cfq: fix starvation of asynchronous writes
        drbd: Fix kernel_sendmsg() usage - potential NULL deref
        lib/genalloc.c: start search from start of chunk
        tools/vm/slabinfo: fix an unintentional printf
        rcu: Fix soft lockup for rcu_nocb_kthread
        ratelimit: fix bug in time interval by resetting right begin time
        mfd: core: Fix device reference leak in mfd_clone_cell
        PM / sleep: fix device reference leak in test_suspend
        mmc: mxs: Initialize the spinlock prior to using it
        mmc: block: don't use CMD23 with very old MMC cards
        pstore/core: drop cmpxchg based updates
        pstore/ram: Use memcpy_toio instead of memcpy
        pstore/ram: Use memcpy_fromio() to save old buffer
        mb86a20s: fix the locking logic
        mb86a20s: fix demod settings
        cx231xx: don't return error on success
        cx231xx: fix GPIOs for Pixelview SBTVD hybrid
        gpio: mpc8xxx: Correct irq handler function
        uio: fix dmem_region_start computation
        KEYS: Fix short sprintf buffer in /proc/keys show function
        hv: do not lose pending heartbeat vmbus packets
        staging: iio: ad5933: avoid uninitialized variable in error case
        mei: bus: fix received data size check in NFC fixup
        ACPI / APEI: Fix incorrect return value of ghes_proc()
        PCI: Handle read-only BARs on AMD CS553x devices
        tile: avoid using clocksource_cyc2ns with absolute cycle count
        dm flakey: fix reads to be issued if drop_writes configured
        mm,ksm: fix endless looping in allocating memory when ksm enable
        can: dev: fix deadlock reported after bus-off
        hwmon: (adt7411) set bit 3 in CFG1 register
        mpi: Fix NULL ptr dereference in mpi_powm() [ver #3]
        mfd: 88pm80x: Double shifting bug in suspend/resume
        ASoC: omap-mcpdm: Fix irq resource handling
        regulator: tps65910: Work around silicon erratum SWCZ010
        dm: mark request_queue dead before destroying the DM device
        fbdev/efifb: Fix 16 color palette entry calculation
        metag: Only define atomic_dec_if_positive conditionally
        Linux 3.10.105

Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>

Conflicts:
	arch/arm/mach-sa1100/generic.c
	arch/arm64/kernel/traps.c
	crypto/blkcipher.c
	drivers/devfreq/devfreq.c
	drivers/usb/dwc3/gadget.c
	drivers/usb/gadget/u_ether.c
	fs/ubifs/dir.c
	include/net/if_inet6.h
	lib/genalloc.c
	net/ipv6/addrconf.c
	net/ipv6/tcp_ipv6.c
	net/wireless/scan.c
	sound/core/timer.c
This commit is contained in:
Nathan Chancellor 2018-01-25 17:45:32 -07:00 committed by TARKZiM
parent e8688a8a8a
commit c02e8f7fd4
285 changed files with 2912 additions and 1144 deletions

View File

@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 104
SUBLEVEL = 105
EXTRAVERSION =
NAME = TOSSUG Baby Fish

View File

@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
return __cu_len;
}
extern inline long
__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
{
if (__access_ok((unsigned long)validate, len, get_fs()))
len = __copy_tofrom_user_nocheck(to, from, len);
return len;
}
#define __copy_to_user(to,from,n) \
({ \
__chk_user_ptr(to); \
@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
extern inline long
copy_to_user(void __user *to, const void *from, long n)
{
return __copy_tofrom_user((__force void *)to, from, n, to);
if (likely(__access_ok((unsigned long)to, n, get_fs())))
n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
return n;
}
extern inline long
copy_from_user(void *to, const void __user *from, long n)
{
return __copy_tofrom_user(to, (__force void *)from, n, from);
if (likely(__access_ok((unsigned long)from, n, get_fs())))
n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
else
memset(to, 0, n);
return n;
}
extern void __do_clear_user(void);

View File

@ -83,7 +83,10 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
"3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
@ -101,7 +104,11 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
"3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" mov %R1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \

View File

@ -80,13 +80,14 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
int err;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (!err)
set_current_blocked(&set);
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
if (err)
return err;
return err;
set_current_blocked(&set);
return 0;
}
static inline int is_do_ss_needed(unsigned int magic)

View File

@ -715,7 +715,7 @@ __armv7_mmu_cache_on:
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
bic r6, r6, #1 << 31 @ 32-bit translation system
bic r6, r6, #3 << 0 @ use only ttbr0
bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mcr p15, 0, r0, c7, c5, 4 @ ISB

View File

@ -872,9 +872,9 @@ struct sa1111_save_data {
#ifdef CONFIG_PM
static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
static int sa1111_suspend_noirq(struct device *dev)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags;
unsigned int val;
@ -937,9 +937,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
* restored by their respective drivers, and must be called
* via LDM after this function.
*/
static int sa1111_resume(struct platform_device *dev)
static int sa1111_resume_noirq(struct device *dev)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags, id;
void __iomem *base;
@ -955,7 +955,7 @@ static int sa1111_resume(struct platform_device *dev)
id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip);
platform_set_drvdata(dev, NULL);
dev_set_drvdata(dev, NULL);
kfree(save);
return 0;
}
@ -1006,8 +1006,8 @@ static int sa1111_resume(struct platform_device *dev)
}
#else
#define sa1111_suspend NULL
#define sa1111_resume NULL
#define sa1111_suspend_noirq NULL
#define sa1111_resume_noirq NULL
#endif
static int sa1111_probe(struct platform_device *pdev)
@ -1041,6 +1041,11 @@ static int sa1111_remove(struct platform_device *pdev)
return 0;
}
static struct dev_pm_ops sa1111_pm_ops = {
.suspend_noirq = sa1111_suspend_noirq,
.resume_noirq = sa1111_resume_noirq,
};
/*
* Not sure if this should be on the system bus or not yet.
* We really want some way to register a system device at
@ -1053,11 +1058,10 @@ static int sa1111_remove(struct platform_device *pdev)
static struct platform_driver sa1111_device_driver = {
.probe = sa1111_probe,
.remove = sa1111_remove,
.suspend = sa1111_suspend,
.resume = sa1111_resume,
.driver = {
.name = "sa1111",
.owner = THIS_MODULE,
.pm = &sa1111_pm_ops,
},
};

View File

@ -58,6 +58,8 @@ void __init arm_dt_init_cpu_maps(void)
return;
for_each_child_of_node(cpus, cpu) {
const __be32 *cell;
int prop_bytes;
u32 hwid;
if (of_node_cmp(cpu->type, "cpu"))
@ -69,17 +71,23 @@ void __init arm_dt_init_cpu_maps(void)
* properties is considered invalid to build the
* cpu_logical_map.
*/
if (of_property_read_u32(cpu, "reg", &hwid)) {
cell = of_get_property(cpu, "reg", &prop_bytes);
if (!cell || prop_bytes < sizeof(*cell)) {
pr_debug(" * %s missing reg property\n",
cpu->full_name);
return;
}
/*
* 8 MSBs must be set to 0 in the DT since the reg property
* Bits n:24 must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
*/
if (hwid & ~MPIDR_HWID_BITMASK)
do {
hwid = be32_to_cpu(*cell++);
prop_bytes -= sizeof(*cell);
} while (!hwid && prop_bytes > 0);
if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK))
return;
/*

View File

@ -31,6 +31,7 @@
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/reset.h>
#include "generic.h"
@ -134,6 +135,7 @@ static void sa1100_power_off(void)
void sa11x0_restart(enum reboot_mode mode, const char *cmd)
{
clear_reset_status(RESET_STATUS_ALL);
if (mode == REBOOT_SOFT) {
/* Jump into ROM at address 0 */
soft_restart(0);

View File

@ -137,6 +137,7 @@ extern unsigned long randomize_et_dyn(unsigned long base);
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \

View File

@ -244,4 +244,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
/*
* Accesses appearing in program order before a spin_lock() operation
* can be reordered with accesses inside the critical section, by virtue
* of arch_spin_lock being constructed using acquire semantics.
*
* In cases where this is problematic (e.g. try_to_wake_up), an
* smp_mb__before_spinlock() can restore the required ordering.
*/
#define smp_mb__before_spinlock() smp_mb()
#endif /* __ASM_SPINLOCK_H */

View File

@ -19,4 +19,6 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33
#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
#endif

View File

@ -428,8 +428,10 @@ int kernel_active_single_step(void)
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
{
set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
set_regs_spsr_ss(task_pt_regs(task));
struct thread_info *ti = task_thread_info(task);
if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
void user_disable_single_step(struct task_struct *task)

View File

@ -507,7 +507,7 @@ el0_inv:
mov x0, sp
mov x1, #BAD_SYNC
mrs x2, esr_el1
b bad_mode
b bad_el0_sync
ENDPROC(el0_sync)
.align 6

View File

@ -406,16 +406,33 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
}
/*
* bad_mode handles the impossible case in the exception vector.
* bad_mode handles the impossible case in the exception vector. This is always
* fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
handler[reason], esr);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0. Unlike bad_mode, this returns.
*/
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
handler[reason], esr);
pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x\n",
smp_processor_id(), esr);
__show_regs(regs);
info.si_signo = SIGILL;
@ -429,7 +446,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
arm64_check_cache_ecc(NULL);
}
arm64_notify_die("Oops - bad mode", regs, &info, 0);
force_sig_info(info.si_signo, &info, current);
}
void __pte_error(const char *file, int line, unsigned long val)

View File

@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
extern __kernel_size_t copy_to_user(void __user *to, const void *from,
__kernel_size_t n);
extern __kernel_size_t copy_from_user(void *to, const void __user *from,
extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
__kernel_size_t n);
static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
{
return __copy_user(to, (const void __force *)from, n);
}
static inline __kernel_size_t copy_from_user(void *to,
const void __user *from,
__kernel_size_t n)
{
size_t res = ___copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

View File

@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user);

View File

@ -23,13 +23,13 @@
*/
.text
.align 1
.global copy_from_user
.type copy_from_user, @function
copy_from_user:
.global ___copy_from_user
.type ___copy_from_user, @function
___copy_from_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10
rjmp __copy_user
.size copy_from_user, . - copy_from_user
.size ___copy_from_user, . - ___copy_from_user
.global copy_to_user
.type copy_to_user, @function

View File

@ -435,7 +435,7 @@ void __init at32_init_pio(struct platform_device *pdev)
struct resource *regs;
struct pio_device *pio;
if (pdev->id > MAX_NR_PIO_DEVICES) {
if (pdev->id >= MAX_NR_PIO_DEVICES) {
dev_err(&pdev->dev, "only %d PIO devices supported\n",
MAX_NR_PIO_DEVICES);
return;

View File

@ -177,11 +177,12 @@ static inline int bad_user_access_length(void)
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
if (likely(access_ok(VERIFY_READ, from, n))) {
memcpy(to, (const void __force *)from, n);
else
return n;
return 0;
return 0;
}
memset(to, 0, n);
return n;
}
static inline unsigned long __must_check

View File

@ -176,30 +176,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
extern unsigned long __do_clear_user(void __user *to, unsigned long n);
static inline unsigned long
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __copy_user(to,from,n);
return n;
}
static inline unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
return __copy_user_zeroing(to,from,n);
return n;
}
static inline unsigned long
__generic_clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __do_clear_user(to,n);
return n;
}
static inline long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
@ -262,7 +238,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
else if (n == 24)
__asm_copy_from_user_24(to, from, ret);
else
ret = __generic_copy_from_user(to, from, n);
ret = __copy_user_zeroing(to, from, n);
return ret;
}
@ -312,7 +288,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
else if (n == 24)
__asm_copy_to_user_24(to, from, ret);
else
ret = __generic_copy_to_user(to, from, n);
ret = __copy_user(to, from, n);
return ret;
}
@ -344,26 +320,43 @@ __constant_clear_user(void __user *to, unsigned long n)
else if (n == 24)
__asm_clear_24(to, ret);
else
ret = __generic_clear_user(to, n);
ret = __do_clear_user(to, n);
return ret;
}
#define clear_user(to, n) \
(__builtin_constant_p(n) ? \
__constant_clear_user(to, n) : \
__generic_clear_user(to, n))
static inline size_t clear_user(void __user *to, size_t n)
{
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
if (__builtin_constant_p(n))
return __constant_clear_user(to, n);
else
return __do_clear_user(to, n);
}
#define copy_from_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user(to, from, n) : \
__generic_copy_from_user(to, from, n))
static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
{
if (unlikely(!access_ok(VERIFY_READ, from, n))) {
memset(to, 0, n);
return n;
}
if (__builtin_constant_p(n))
return __constant_copy_from_user(to, from, n);
else
return __copy_user_zeroing(to, from, n);
}
#define copy_to_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user(to, from, n) : \
__generic_copy_to_user(to, from, n))
static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
{
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
if (__builtin_constant_p(n))
return __constant_copy_to_user(to, from, n);
else
return __copy_user(to, from, n);
}
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.

View File

@ -263,19 +263,25 @@ do { \
extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count);
#define clear_user(dst,count) __memset_user(____force(dst), (count))
#define __clear_user(dst,count) __memset_user(____force(dst), (count))
#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else
#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif
#define __clear_user clear_user
static inline unsigned long __must_check
clear_user(void __user *to, unsigned long n)
{
if (likely(__access_ok(to, n)))
n = __clear_user(to, n);
return n;
}
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)

View File

@ -102,7 +102,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
{
long res = __strnlen_user(src, n);
/* return from strnlen can't be zero -- that would be rubbish. */
if (unlikely(!res))
return -EFAULT;
if (res > n) {
copy_from_user(dst, src, n);

View File

@ -262,17 +262,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
__cu_len; \
})
#define copy_from_user(to, from, n) \
({ \
void *__cu_to = (to); \
const void __user *__cu_from = (from); \
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
if (__access_ok(__cu_from, __cu_len, get_fs())) \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
__cu_len; \
})
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (likely(__access_ok(from, n, get_fs())))
n = __copy_user((__force void __user *) to, from, n);
else
memset(to, 0, n);
return n;
}
#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))

View File

@ -215,7 +215,7 @@ extern int fixup_exception(struct pt_regs *regs);
#define __get_user_nocheck(x,ptr,size) \
({ \
long __gu_err = 0; \
unsigned long __gu_val; \
unsigned long __gu_val = 0; \
might_sleep(); \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \

View File

@ -38,6 +38,7 @@
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
@ -46,8 +47,6 @@
#endif
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#include <asm-generic/atomic64.h>
#endif /* __ASM_METAG_ATOMIC_H */

View File

@ -199,8 +199,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n);
memset(to, 0, n);
return n;
}

View File

@ -226,7 +226,7 @@ extern long __user_bad(void);
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val; \
unsigned long __gu_val = 0; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
@ -371,10 +371,13 @@ extern long __user_bad(void);
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
unsigned long res = n;
might_sleep();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
return n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_to_user(to, from, n) \

View File

@ -375,7 +375,10 @@ struct kvm_vcpu_arch {
/* Host KSEG0 address of the EI/DI offset */
void *kseg0_commpage;
u32 io_gpr; /* GPR used as IO source/target */
/* Resume PC after MMIO completion */
unsigned long io_pc;
/* GPR used as IO source/target */
u32 io_gpr;
/* Used to calibrate the virutal count register for the guest */
int32_t host_cp0_count;
@ -386,8 +389,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
unsigned long pending_load_cause;
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;

View File

@ -73,7 +73,7 @@ static inline int is_syscall_success(struct pt_regs *regs)
static inline long regs_return_value(struct pt_regs *regs)
{
if (is_syscall_success(regs))
if (is_syscall_success(regs) || !user_mode(regs))
return regs->regs[2];
else
return -regs->regs[2];

View File

@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h>
/*
* The fs value determines whether argument validity checking should be
@ -938,6 +939,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} else { \
memset(__cu_to, 0, __cu_len); \
} \
__cu_len; \
})

View File

@ -254,15 +254,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
kvm_read_c0_guest_epc(cop0));
kvm_clear_c0_guest_status(cop0, ST0_EXL);
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else {
printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
vcpu->arch.pc);
@ -310,6 +310,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
return er;
}
/**
* kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
* @vcpu: VCPU with changed mappings.
* @tlb: TLB entry being removed.
*
* This is called to indicate a single change in guest MMU mappings, so that we
* can arrange TLB flushes on this and other CPUs.
*/
static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
struct kvm_mips_tlb *tlb)
{
int cpu, i;
bool user;
/* No need to flush for entries which are already invalid */
if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
return;
/* User address space doesn't need flushing for KSeg2/3 changes */
user = tlb->tlb_hi < KVM_GUEST_KSEG0;
preempt_disable();
/*
* Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it
*/
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
/* Invalidate the whole ASID on other CPUs */
cpu = smp_processor_id();
for_each_possible_cpu(i) {
if (i == cpu)
continue;
if (user)
vcpu->arch.guest_user_asid[i] = 0;
vcpu->arch.guest_kernel_asid[i] = 0;
}
preempt_enable();
}
/* Write Guest TLB Entry @ Index */
enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
{
@ -331,10 +372,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
}
tlb = &vcpu->arch.guest_tlb[index];
#if 1
/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
#endif
kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@ -373,10 +412,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb = &vcpu->arch.guest_tlb[index];
#if 1
/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
#endif
kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@ -419,6 +455,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
int32_t rt, rd, copz, sel, co_bit, op;
uint32_t pc = vcpu->arch.pc;
unsigned long curr_pc;
int cpu, i;
/*
* Update PC and hold onto current PC in case there is
@ -538,8 +575,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
ASID_MASK,
vcpu->arch.gprs[rt] & ASID_MASK);
preempt_disable();
/* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1);
cpu = smp_processor_id();
for_each_possible_cpu(i)
if (i != cpu) {
vcpu->arch.guest_user_asid[i] = 0;
vcpu->arch.guest_kernel_asid[i] = 0;
}
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]);
@ -773,6 +818,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
struct kvm_run *run, struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
unsigned long curr_pc;
int32_t op, base, rt, offset;
uint32_t bytes;
@ -781,7 +827,18 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
offset = inst & 0xffff;
op = (inst >> 26) & 0x3f;
vcpu->arch.pending_load_cause = cause;
/*
* Find the resume PC now while we have safe and easy access to the
* prior branch instruction, and save it for
* kvm_mips_complete_mmio_load() to restore later.
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
vcpu->arch.io_pc = vcpu->arch.pc;
vcpu->arch.pc = curr_pc;
vcpu->arch.io_gpr = rt;
switch (op) {
@ -1610,7 +1667,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
unsigned long curr_pc;
if (run->mmio.len > sizeof(*gpr)) {
printk("Bad MMIO length: %d", run->mmio.len);
@ -1618,14 +1674,8 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
goto done;
}
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, vcpu->arch.pending_load_cause);
if (er == EMULATE_FAIL)
return er;
/* Restore saved resume PC */
vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) {
case 4:
@ -1647,12 +1697,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
break;
}
if (vcpu->arch.pending_load_cause & CAUSEF_BD)
kvm_debug
("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
vcpu->mmio_needed);
done:
return er;
}

View File

@ -36,6 +36,9 @@
#include <linux/console.h>
#endif
#define ROCIT_CONFIG_GEN0 0x1f403000
#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
extern void malta_be_init(void);
extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
@ -108,6 +111,8 @@ static void __init fd_activate(void)
static int __init plat_enable_iocoherency(void)
{
int supported = 0;
u32 cfg;
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
@ -130,7 +135,8 @@ static int __init plat_enable_iocoherency(void)
} else if (gcmp_niocu() != 0) {
/* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n");
if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
return 0;
}

View File

@ -181,6 +181,7 @@ struct __large_struct { unsigned long buf[100]; };
"2:\n" \
" .section .fixup,\"ax\"\n" \
"3:\n\t" \
" mov 0,%1\n" \
" mov %3,%0\n" \
" jmp 2b\n" \
" .previous\n" \

View File

@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <asm/uaccess.h>
#include <linux/uaccess.h>
unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to, from, n);
else
memset(to, 0, n);
return n;
}

View File

@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n)
{
unsigned long over;
unsigned long res = n;
if (access_ok(VERIFY_READ, from, n))
return __copy_tofrom_user(to, from, n);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
return __copy_tofrom_user(to, from, n - over) + over;
}
return n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_tofrom_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n))
return __copy_tofrom_user(to, from, n);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
return __copy_tofrom_user(to, from, n - over) + over;
}
if (likely(access_ok(VERIFY_WRITE, to, n)))
n = __copy_tofrom_user(to, from, n);
return n;
}
@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
static inline __must_check unsigned long
clear_user(void *addr, unsigned long size)
{
if (access_ok(VERIFY_WRITE, addr, size))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
return __clear_user(addr, size - over) + over;
}
if (likely(access_ok(VERIFY_WRITE, addr, size)))
size = __clear_user(addr, size);
return size;
}

View File

@ -9,6 +9,8 @@
#include <asm/errno.h>
#include <asm-generic/uaccess-unaligned.h>
#include <linux/string.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@ -246,13 +248,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
unsigned long n)
{
int sz = __compiletime_object_size(to);
int ret = -EFAULT;
unsigned long ret = n;
if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
ret = __copy_from_user(to, from, n);
else
copy_from_user_overflow();
if (unlikely(ret))
memset(to + (n - ret), 0, ret);
return ret;
}

View File

@ -106,8 +106,6 @@ linux_gateway_entry:
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
mfsp %sr7,%r1 /* save user sr7 */
mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
/* We use a rsm/ssm pair to prevent sr3 from being clobbered
* by external interrupts.
*/
mfsp %sr7,%r1 /* save user sr7 */
rsm PSW_SM_I, %r0 /* disable interrupts */
mtsp %r1,%sr3 /* and store it in sr3 */
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@ -147,6 +153,7 @@ linux_gateway_entry:
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1

View File

@ -323,30 +323,17 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_READ, from, n))
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_tofrom_user((__force void __user *)to, from, n);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
}
memset(to, 0, n);
return n;
}
static inline unsigned long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n))
return __copy_tofrom_user(to, (__force void __user *)from, n);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
}
return n;
}
@ -437,10 +424,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
might_sleep();
if (likely(access_ok(VERIFY_WRITE, addr, size)))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
return __clear_user(addr, size - over) + over;
}
return size;
}

View File

@ -280,7 +280,7 @@ int __init nvram_remove_partition(const char *name, int sig,
/* Make partition a free partition */
part->header.signature = NVRAM_SIG_FREE;
strncpy(part->header.name, "wwwwwwwwwwww", 12);
memset(part->header.name, 'w', 12);
part->header.checksum = nvram_checksum(&part->header);
rc = nvram_write_header(part);
if (rc <= 0) {
@ -298,8 +298,8 @@ int __init nvram_remove_partition(const char *name, int sig,
}
if (prev) {
prev->header.length += part->header.length;
prev->header.checksum = nvram_checksum(&part->header);
rc = nvram_write_header(part);
prev->header.checksum = nvram_checksum(&prev->header);
rc = nvram_write_header(prev);
if (rc <= 0) {
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;

View File

@ -57,7 +57,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
bl V_LOCAL_FUNC(__get_datapage)
mtlr r12
addi r3,r3,CFG_SYSCALL_MAP64
cmpli cr0,r4,0
cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
li r0,__NR_syscalls

View File

@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
bne cr0,99f
li r3,0
cmpli cr0,r4,0
cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
lis r5,CLOCK_REALTIME_RES@h

View File

@ -336,6 +336,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
addi r3,r3,8
171:
177:
179:
addi r3,r3,8
370:
372:
@ -350,7 +351,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
173:
174:
175:
179:
181:
184:
186:

View File

@ -110,7 +110,12 @@ BEGIN_FTR_SECTION
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
0:
0: /*
* For userspace addresses, make sure this is region 0.
*/
cmpdi r9, 0
bne 8f
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
* array.

View File

@ -176,8 +176,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1);
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
(be64_to_cpu(data->pestB[i]) >> 63) == 0)
continue;
pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]);
pr_info(" PESTB = 0x%016llx\n", data->pestB[i]);

View File

@ -164,28 +164,28 @@ extern int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
unsigned char __x; \
unsigned char __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 2: { \
unsigned short __x; \
unsigned short __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 4: { \
unsigned int __x; \
unsigned int __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 8: { \
unsigned long long __x; \
unsigned long long __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \

View File

@ -158,7 +158,7 @@ do { \
__get_user_asm(val, "lw", ptr); \
break; \
case 8: \
if ((copy_from_user((void *)&val, ptr, 8)) == 0) \
if (__copy_from_user((void *)&val, ptr, 8) == 0) \
__gu_err = 0; \
else \
__gu_err = -EFAULT; \
@ -183,6 +183,8 @@ do { \
\
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
else \
(x) = 0; \
\
__gu_err; \
})
@ -196,6 +198,7 @@ do { \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \
"li %1, 0\n" \
"j 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
@ -293,35 +296,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long len)
{
unsigned long over;
unsigned long res = len;
if (access_ok(VERIFY_READ, from, len))
return __copy_tofrom_user(to, from, len);
if (likely(access_ok(VERIFY_READ, from, len)))
res = __copy_tofrom_user(to, from, len);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + len - TASK_SIZE;
return __copy_tofrom_user(to, from, len - over) + over;
}
return len;
if (unlikely(res))
memset(to + (len - res), 0, res);
return res;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long len)
{
unsigned long over;
if (likely(access_ok(VERIFY_WRITE, to, len)))
len = __copy_tofrom_user(to, from, len);
if (access_ok(VERIFY_WRITE, to, len))
return __copy_tofrom_user(to, from, len);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + len - TASK_SIZE;
return __copy_tofrom_user(to, from, len - over) + over;
}
return len;
}
#define __copy_from_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
static inline unsigned long
__copy_from_user(void *to, const void *from, unsigned long len)
{
unsigned long left = __copy_tofrom_user(to, from, len);
if (unlikely(left))
memset(to + (len - left), 0, left);
return left;
}
#define __copy_to_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
@ -335,17 +337,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
static inline unsigned long
__copy_from_user_inatomic(void *to, const void *from, unsigned long len)
{
return __copy_from_user(to, from, len);
return __copy_tofrom_user(to, from, len);
}
#define __copy_in_user(to, from, len) __copy_from_user(to, from, len)
#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long len)
{
if (access_ok(VERIFY_READ, from, len) &&
access_ok(VERFITY_WRITE, to, len))
return copy_from_user(to, from, len);
return __copy_tofrom_user(to, from, len);
}
/*

View File

@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
__kernel_size_t __copy_size = (__kernel_size_t) n;
if (__copy_size && __access_ok(__copy_from, __copy_size))
return __copy_user(to, from, __copy_size);
__copy_size = __copy_user(to, from, __copy_size);
if (unlikely(__copy_size))
memset(to + (n - __copy_size), 0, __copy_size);
return __copy_size;
}

View File

@ -24,6 +24,7 @@
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
x = 0; \
switch (size) { \
case 1: \
retval = __get_user_asm_b((void *)&x, \

View File

@ -265,8 +265,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
{
if (n && __access_ok((unsigned long) from, n))
return __copy_user((__force void __user *) to, from, n);
else
else {
memset(to, 0, n);
return n;
}
}
static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)

View File

@ -215,8 +215,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
*/
unsigned long long sched_clock(void)
{
return clocksource_cyc2ns(get_cycles(),
sched_clock_mult, SCHED_CLOCK_SHIFT);
return mult_frac(get_cycles(),
sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
}
int setup_profiling_timer(unsigned int multiplier)

View File

@ -7,7 +7,7 @@
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
@ -20,6 +20,18 @@ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
LDFLAGS := -m elf_$(UTS_MACHINE)
ifeq ($(CONFIG_RELOCATABLE),y)
# If kernel is relocatable, build compressed kernel as PIE.
ifeq ($(CONFIG_X86_32),y)
LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
else
# To build 64-bit compressed kernel as PIE, we disable relocation
# overflow check to avoid relocation overflow error with a new linker
# command-line option, -z noreloc-overflow.
LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
endif
LDFLAGS_vmlinux := -T
hostprogs-y := mkpiggy

View File

@ -30,6 +30,34 @@
#include <asm/boot.h>
#include <asm/asm-offsets.h>
/*
* The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
* relocation to get the symbol address in PIC. When the compressed x86
* kernel isn't built as PIC, the linker optimizes R_386_GOT32X
* relocations to their fixed symbol addresses. However, when the
* compressed x86 kernel is loaded at a different address, it leads
* to the following load failure:
*
* Failed to allocate space for phdrs
*
* during the decompression stage.
*
* If the compressed x86 kernel is relocatable at run-time, it should be
* compiled with -fPIE, instead of -fPIC, if possible and should be built as
* Position Independent Executable (PIE) so that linker won't optimize
* R_386_GOT32X relocation to its fixed symbol address. Older
* linkers generate R_386_32 relocations against locally defined symbols,
* _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
* optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
* R_386_32 relocations when relocating the kernel. To generate
* R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
* hidden:
*/
.hidden _bss
.hidden _ebss
.hidden _got
.hidden _egot
__HEAD
ENTRY(startup_32)
#ifdef CONFIG_EFI_STUB

View File

@ -34,6 +34,14 @@
#include <asm/processor-flags.h>
#include <asm/asm-offsets.h>
/*
* Locally defined symbols should be marked hidden:
*/
.hidden _bss
.hidden _ebss
.hidden _got
.hidden _egot
__HEAD
.code32
ENTRY(startup_32)

View File

@ -4,6 +4,7 @@
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
#define hugepages_supported() cpu_has_pse
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,

View File

@ -17,7 +17,14 @@
static inline void __native_flush_tlb(void)
{
/*
* If current->mm == NULL then we borrow a mm which may change during a
* task switch and therefore we must not be preempted while we write CR3
* back:
*/
preempt_disable();
native_write_cr3(native_read_cr3());
preempt_enable();
}
static inline void __native_flush_tlb_global_irq_disabled(void)

View File

@ -381,7 +381,7 @@ do { \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr)))
: ltype(x) : "m" (__m(addr)), "0" (0))
#define __put_user_nocheck(x, ptr, size) \
({ \

View File

@ -1581,6 +1581,9 @@ void __init enable_IR_x2apic(void)
int ret, x2apic_enabled = 0;
int hardware_init_ret;
if (skip_ioapic_setup)
return;
/* Make sure irq_remap_ops are initialized */
setup_irq_remapping_ops();

View File

@ -586,7 +586,7 @@ early_idt_handler_common:
movl %eax,%ds
movl %eax,%es
cmpl $(__KERNEL_CS),32(%esp)
cmpw $(__KERNEL_CS),32(%esp)
jne 10f
leal 28(%esp),%eax # Pointer to %eip

View File

@ -46,12 +46,12 @@ void _paravirt_nop(void)
}
/* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x)
u32 notrace _paravirt_ident_32(u32 x)
{
return x;
}
u64 _paravirt_ident_64(u64 x)
u64 notrace _paravirt_ident_64(u64 x)
{
return x;
}

View File

@ -366,6 +366,7 @@ struct nested_vmx {
struct list_head vmcs02_pool;
int vmcs02_num;
u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
/*
@ -6702,6 +6703,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
{
u32 sec_exec_control;
/* Postpone execution until vmcs01 is the current VMCS. */
if (is_guest_mode(vcpu)) {
to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
return;
}
/*
* There is not point to enable virtualize x2apic without enable
* apicv
@ -8085,6 +8092,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
/* Update TSC_OFFSET if TSC was changed while L2 ran */
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu,
vcpu->arch.apic_base & X2APIC_ENABLE);
}
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0;

View File

@ -178,7 +178,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
unsigned long flags;
/*
* Disabling irqs at this point since the following code could be
* interrupted and executed through kvm_arch_hardware_disable()
*/
local_irq_save(flags);
if (locals->registered) {
locals->registered = false;
user_return_notifier_unregister(urn);
}
local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
@ -186,8 +197,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
values->curr = values->host;
}
}
locals->registered = false;
user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
@ -3182,6 +3191,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
int idx;
r = -EINVAL;
if (!irqchip_in_kernel(vcpu->kvm))
@ -3189,7 +3199,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_X86_SETUP_MCE: {
@ -6509,11 +6521,13 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
kvmclock_reset(vcpu);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fx_free(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
free_cpumask_var(wbinvd_dirty_mask);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,

View File

@ -505,11 +505,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
current->comm, from, to - 1);
if (!devmem_is_allowed(pfn))
return 0;
}
cursor += PAGE_SIZE;
pfn++;
}

View File

@ -51,11 +51,7 @@
#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#include <asm-generic/barrier.h>
#endif /* CONFIG_SMP */

View File

@ -1187,7 +1187,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;

View File

@ -2812,7 +2812,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
if (time_before(jiffies, rq_fifo_time(rq)))
rq = NULL;
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
@ -3189,6 +3188,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned int max_dispatch;
if (cfq_cfqq_must_dispatch(cfqq))
return true;
/*
* Drain async requests before we start sync IO
*/
@ -3280,15 +3282,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
rq = cfq_check_fifo(cfqq);
if (rq)
cfq_mark_cfqq_must_dispatch(cfqq);
if (!cfq_may_dispatch(cfqd, cfqq))
return false;
/*
* follow expired path, else get first next available
*/
rq = cfq_check_fifo(cfqq);
if (!rq)
rq = cfqq->next_rq;
else
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
/*
* insert request into driver dispatch list
@ -3797,7 +3804,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
return true;
if (new_cfqq->cfqg != cfqq->cfqg)

View File

@ -379,6 +379,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
}
crt->base = __crypto_ablkcipher_cast(tfm);
crt->ivsize = alg->ivsize;
crt->has_setkey = alg->max_keysize;
return 0;
}
@ -460,6 +461,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
crt->base = __crypto_ablkcipher_cast(tfm);
crt->ivsize = alg->ivsize;
crt->has_setkey = alg->max_keysize;
return 0;
}

View File

@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
goto unlock;
type->ops->owner = THIS_MODULE;
if (type->ops_nokey)
type->ops_nokey->owner = THIS_MODULE;
node->type = type;
list_add(&node->list, &alg_types);
err = 0;
@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock)
}
EXPORT_SYMBOL_GPL(af_alg_release);
void af_alg_release_parent(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
unsigned int nokey = ask->nokey_refcnt;
bool last = nokey && !ask->refcnt;
sk = ask->parent;
ask = alg_sk(sk);
lock_sock(sk);
ask->nokey_refcnt -= nokey;
if (!last)
last = !--ask->refcnt;
release_sock(sk);
if (last)
sock_put(sk);
}
EXPORT_SYMBOL_GPL(af_alg_release_parent);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
@ -132,6 +154,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_alg *sa = (void *)uaddr;
const struct af_alg_type *type;
void *private;
int err;
if (sock->state == SS_CONNECTED)
return -EINVAL;
@ -157,16 +180,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return PTR_ERR(private);
}
err = -EBUSY;
lock_sock(sk);
if (ask->refcnt | ask->nokey_refcnt)
goto unlock;
swap(ask->type, type);
swap(ask->private, private);
err = 0;
unlock:
release_sock(sk);
alg_do_release(type, private);
return 0;
return err;
}
static int alg_setkey(struct sock *sk, char __user *ukey,
@ -199,11 +228,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
int err = -ENOPROTOOPT;
int err = -EBUSY;
lock_sock(sk);
if (ask->refcnt)
goto unlock;
type = ask->type;
err = -ENOPROTOOPT;
if (level != SOL_ALG || !type)
goto unlock;
@ -228,6 +261,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
struct sock *sk2;
unsigned int nokey;
int err;
lock_sock(sk);
@ -247,18 +281,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
security_sk_clone(sk, sk2);
err = type->accept(ask->private, sk2);
nokey = err == -ENOKEY;
if (nokey && type->accept_nokey)
err = type->accept_nokey(ask->private, sk2);
if (err)
goto unlock;
sk2->sk_family = PF_ALG;
sock_hold(sk);
if (nokey || !ask->refcnt++)
sock_hold(sk);
ask->nokey_refcnt += nokey;
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
alg_sk(sk2)->nokey_refcnt = nokey;
newsock->ops = type->ops;
newsock->state = SS_CONNECTED;
if (nokey)
newsock->ops = type->ops_nokey;
err = 0;
unlock:

View File

@ -410,6 +410,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
@ -422,8 +423,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
if (alg->setkey)
if (alg->setkey) {
hash->setkey = alg->setkey;
hash->has_setkey = true;
}
if (alg->export)
hash->export = alg->export;
if (alg->import)

View File

@ -34,6 +34,11 @@ struct hash_ctx {
struct ahash_request req;
};
struct algif_hash_tfm {
struct crypto_ahash *hash;
bool has_key;
};
static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored)
{
@ -248,19 +253,151 @@ static struct proto_ops algif_hash_ops = {
.accept = hash_accept,
};
static int hash_check_key(struct socket *sock)
{
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct algif_hash_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
if (ask->refcnt)
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (!tfm->has_key)
goto unlock;
if (!pask->refcnt++)
sock_hold(psk);
ask->refcnt = 1;
sock_put(psk);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int hash_sendmsg_nokey(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t size)
{
int err;
err = hash_check_key(sock);
if (err)
return err;
return hash_sendmsg(unused, sock, msg, size);
}
static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
int err;
err = hash_check_key(sock);
if (err)
return err;
return hash_sendpage(sock, page, offset, size, flags);
}
static int hash_recvmsg_nokey(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
{
int err;
err = hash_check_key(sock);
if (err)
return err;
return hash_recvmsg(unused, sock, msg, ignored, flags);
}
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
int flags)
{
int err;
err = hash_check_key(sock);
if (err)
return err;
return hash_accept(sock, newsock, flags);
}
static struct proto_ops algif_hash_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.setsockopt = sock_no_setsockopt,
.poll = sock_no_poll,
.release = af_alg_release,
.sendmsg = hash_sendmsg_nokey,
.sendpage = hash_sendpage_nokey,
.recvmsg = hash_recvmsg_nokey,
.accept = hash_accept_nokey,
};
static void *hash_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_ahash(name, type, mask);
struct algif_hash_tfm *tfm;
struct crypto_ahash *hash;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
hash = crypto_alloc_ahash(name, type, mask);
if (IS_ERR(hash)) {
kfree(tfm);
return ERR_CAST(hash);
}
tfm->hash = hash;
return tfm;
}
static void hash_release(void *private)
{
crypto_free_ahash(private);
struct algif_hash_tfm *tfm = private;
crypto_free_ahash(tfm->hash);
kfree(tfm);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_ahash_setkey(private, key, keylen);
struct algif_hash_tfm *tfm = private;
int err;
err = crypto_ahash_setkey(tfm->hash, key, keylen);
tfm->has_key = !err;
return err;
}
static void hash_sock_destruct(struct sock *sk)
@ -274,12 +411,14 @@ static void hash_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
static int hash_accept_parent(void *private, struct sock *sk)
static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
struct hash_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
unsigned ds = crypto_ahash_digestsize(private);
struct algif_hash_tfm *tfm = private;
struct crypto_ahash *hash = tfm->hash;
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@ -299,7 +438,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, private);
ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
@ -308,12 +447,24 @@ static int hash_accept_parent(void *private, struct sock *sk)
return 0;
}
static int hash_accept_parent(void *private, struct sock *sk)
{
struct algif_hash_tfm *tfm = private;
if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
return -ENOKEY;
return hash_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_hash = {
.bind = hash_bind,
.release = hash_release,
.setkey = hash_setkey,
.accept = hash_accept_parent,
.accept_nokey = hash_accept_parent_nokey,
.ops = &algif_hash_ops,
.ops_nokey = &algif_hash_ops_nokey,
.name = "hash",
.owner = THIS_MODULE
};

View File

@ -31,6 +31,11 @@ struct skcipher_sg_list {
struct scatterlist sg[0];
};
struct skcipher_tfm {
struct crypto_ablkcipher *skcipher;
bool has_key;
};
struct skcipher_ctx {
struct list_head tsgl;
struct af_alg_sgl rsgl;
@ -544,19 +549,139 @@ static struct proto_ops algif_skcipher_ops = {
.poll = skcipher_poll,
};
static int skcipher_check_key(struct socket *sock)
{
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct skcipher_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
if (ask->refcnt)
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (!tfm->has_key)
goto unlock;
if (!pask->refcnt++)
sock_hold(psk);
ask->refcnt = 1;
sock_put(psk);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int skcipher_sendmsg_nokey(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t size)
{
int err;
err = skcipher_check_key(sock);
if (err)
return err;
return skcipher_sendmsg(unused, sock, msg, size);
}
static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
int err;
err = skcipher_check_key(sock);
if (err)
return err;
return skcipher_sendpage(sock, page, offset, size, flags);
}
static int skcipher_recvmsg_nokey(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
{
int err;
err = skcipher_check_key(sock);
if (err)
return err;
return skcipher_recvmsg(unused, sock, msg, ignored, flags);
}
static struct proto_ops algif_skcipher_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.setsockopt = sock_no_setsockopt,
.release = af_alg_release,
.sendmsg = skcipher_sendmsg_nokey,
.sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
.poll = skcipher_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_ablkcipher(name, type, mask);
struct skcipher_tfm *tfm;
struct crypto_ablkcipher *skcipher;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
skcipher = crypto_alloc_ablkcipher(name, type, mask);
if (IS_ERR(skcipher)) {
kfree(tfm);
return ERR_CAST(skcipher);
}
tfm->skcipher = skcipher;
return tfm;
}
static void skcipher_release(void *private)
{
crypto_free_ablkcipher(private);
struct skcipher_tfm *tfm = private;
crypto_free_ablkcipher(tfm->skcipher);
kfree(tfm);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_ablkcipher_setkey(private, key, keylen);
struct skcipher_tfm *tfm = private;
int err;
err = crypto_ablkcipher_setkey(tfm->skcipher, key, keylen);
tfm->has_key = !err;
return err;
}
static void skcipher_sock_destruct(struct sock *sk)
@ -571,24 +696,25 @@ static void skcipher_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
static int skcipher_accept_parent(void *private, struct sock *sk)
static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
struct skcipher_tfm *tfm = private;
struct crypto_ablkcipher *skcipher = tfm->skcipher;
unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
@ -600,21 +726,33 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
ablkcipher_request_set_tfm(&ctx->req, private);
ablkcipher_request_set_tfm(&ctx->req, skcipher);
ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
return 0;
}
static int skcipher_accept_parent(void *private, struct sock *sk)
{
struct skcipher_tfm *tfm = private;
if (!tfm->has_key && crypto_ablkcipher_has_setkey(tfm->skcipher))
return -ENOKEY;
return skcipher_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
.accept = skcipher_accept_parent,
.accept_nokey = skcipher_accept_parent_nokey,
.ops = &algif_skcipher_ops,
.ops_nokey = &algif_skcipher_ops_nokey,
.name = "skcipher",
.owner = THIS_MODULE
};

View File

@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
return blkcipher_walk_done(desc, walk, -EINVAL);
}
bsize = min(walk->walk_blocksize, n);
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
}
}
bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);
@ -471,6 +472,7 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
}
crt->base = __crypto_ablkcipher_cast(tfm);
crt->ivsize = alg->ivsize;
crt->has_setkey = alg->max_keysize;
return 0;
}

View File

@ -565,9 +565,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct shash_desc *desc = cryptd_shash_desc(req);
return crypto_shash_import(&rctx->desc, in);
desc->tfm = ctx->child;
desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,

View File

@ -109,7 +109,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_ablkcipher *ctr = ctx->ctr;
struct {
be128 hash;
u8 iv[8];
u8 iv[16];
struct crypto_gcm_setkey_result result;

View File

@ -353,9 +353,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
crt->has_setkey = alg->setkey != shash_no_setkey;
if (alg->setkey)
crt->setkey = shash_async_setkey;
if (alg->export)
crt->export = shash_async_export;
if (alg->import)

View File

@ -647,7 +647,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
return 0;
return rc;
}
static void ghes_add_timer(struct ghes *ghes)

View File

@ -827,11 +827,29 @@ static struct kobject *get_device_parent(struct device *dev,
return NULL;
}
static inline bool live_in_glue_dir(struct kobject *kobj,
struct device *dev)
{
if (!kobj || !dev->class ||
kobj->kset != &dev->class->p->glue_dirs)
return false;
return true;
}
static inline struct kobject *get_glue_dir(struct device *dev)
{
return dev->kobj.parent;
}
/*
* make sure cleaning up dir as the last step, we need to make
* sure .release handler of kobject is run with holding the
* global lock
*/
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
/* see if we live in a "glue" directory */
if (!glue_dir || !dev->class ||
glue_dir->kset != &dev->class->p->glue_dirs)
if (!live_in_glue_dir(glue_dir, dev))
return;
mutex_lock(&gdp_mutex);
@ -839,11 +857,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
mutex_unlock(&gdp_mutex);
}
static void cleanup_device_parent(struct device *dev)
{
cleanup_glue_dir(dev, dev->kobj.parent);
}
static int device_add_class_symlinks(struct device *dev)
{
int error;
@ -1007,6 +1020,7 @@ int device_add(struct device *dev)
struct kobject *kobj;
struct class_interface *class_intf;
int error = -EINVAL;
struct kobject *glue_dir = NULL;
dev = get_device(dev);
if (!dev)
@ -1051,8 +1065,10 @@ int device_add(struct device *dev)
/* first, register with generic layer. */
/* we require the name to be set before, and pass NULL */
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
if (error)
if (error) {
glue_dir = get_glue_dir(dev);
goto Error;
}
/* notify platform of device entry */
if (platform_notify)
@ -1141,11 +1157,11 @@ done:
device_remove_file(dev, &uevent_attr);
attrError:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
Error:
cleanup_device_parent(dev);
if (parent)
put_device(parent);
cleanup_glue_dir(dev, glue_dir);
put_device(parent);
name_error:
kfree(dev->p);
dev->p = NULL;
@ -1216,6 +1232,7 @@ void put_device(struct device *dev)
void device_del(struct device *dev)
{
struct device *parent = dev->parent;
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
/* Notify clients of device removal. This call must come
@ -1257,8 +1274,9 @@ void device_del(struct device *dev)
if (platform_notify_remove)
platform_notify_remove(dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
cleanup_device_parent(dev);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
cleanup_glue_dir(dev, glue_dir);
put_device(parent);
}

View File

@ -1771,7 +1771,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
* do we need to block DRBD_SIG if sock == &meta.socket ??
* otherwise wake_asender() might interrupt some send_*Ack !
*/
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
if (rv == -EAGAIN) {
if (we_should_drop_the_connection(tconn, sock))
break;

View File

@ -269,8 +269,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;
switch (src->operation) {
dst->operation = ACCESS_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
@ -305,8 +305,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;
switch (src->operation) {
dst->operation = ACCESS_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:

View File

@ -105,6 +105,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
struct resource *res;
int ret;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
@ -132,7 +133,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return hwrng_register(&exynos_rng->rng);
ret = hwrng_register(&exynos_rng->rng);
if (ret) {
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
return ret;
}
static int exynos_rng_remove(struct platform_device *pdev)

View File

@ -127,7 +127,12 @@ static int omap_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap;
}
ret = hwrng_register(&omap_rng_ops);
if (ret)
@ -182,8 +187,15 @@ static int omap_rng_suspend(struct device *dev)
static int omap_rng_resume(struct device *dev)
{
struct omap_rng_private_data *priv = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(dev);
return ret;
}
pm_runtime_get_sync(dev);
omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
return 0;

View File

@ -73,12 +73,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
u64 cursor = from;
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
printk(KERN_INFO
"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
current->comm, from, to);
if (!devmem_is_allowed(pfn))
return 0;
}
cursor += PAGE_SIZE;
pfn++;
}

View File

@ -968,7 +968,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
mci->ue_mc += count;
if (!enable_per_layer_report) {
mci->ce_noinfo_count += count;
mci->ue_noinfo_count += count;
return;
}

View File

@ -73,13 +73,13 @@ struct rfc2734_header {
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@ -591,6 +591,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int retval;
u16 ether_type;
if (len <= RFC2374_UNFRAG_HDR_SIZE)
return 0;
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
@ -615,7 +618,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
/* A datagram fragment has been received, now the fun begins. */
if (len <= RFC2374_FRAG_HDR_SIZE)
return 0;
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
@ -627,7 +635,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
dg_size = fwnet_get_hdr_dg_size(&hdr);
if (fg_off + len > dg_size)
return 0;
spin_lock_irqsave(&dev->lock, flags);
@ -735,6 +746,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
fw_send_response(card, r, rcode);
}
static int gasp_source_id(__be32 *p)
{
return be32_to_cpu(p[0]) >> 16;
}
static u32 gasp_specifier_id(__be32 *p)
{
return (be32_to_cpu(p[0]) & 0xffff) << 8 |
(be32_to_cpu(p[1]) & 0xff000000) >> 24;
}
static u32 gasp_version(__be32 *p)
{
return be32_to_cpu(p[1]) & 0xffffff;
}
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
@ -744,9 +771,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
__be32 *buf_ptr;
int retval;
u32 length;
u16 source_node_id;
u32 specifier_id;
u32 ver;
unsigned long offset;
unsigned long flags;
@ -763,22 +787,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
spin_unlock_irqrestore(&dev->lock, flags);
specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
| (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
if (specifier_id == IANA_SPECIFIER_ID &&
(ver == RFC2734_SW_VERSION
if (length > IEEE1394_GASP_HDR_SIZE &&
gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
(gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
|| ver == RFC3146_SW_VERSION
|| gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
)) {
buf_ptr += 2;
length -= IEEE1394_GASP_HDR_SIZE;
fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
))
fwnet_incoming_packet(dev, buf_ptr + 2,
length - IEEE1394_GASP_HDR_SIZE,
gasp_source_id(buf_ptr),
context->card->generation, true);
}
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;

View File

@ -295,7 +295,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq,
mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_edge_irq);
return 0;
}

View File

@ -3422,6 +3422,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
int hdisplay, vdisplay;
int ret = -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
page_flip->reserved != 0)
return -EINVAL;

View File

@ -114,6 +114,8 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
palette_bo);
ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
if (ret)
return ret;
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {

View File

@ -257,6 +257,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
/* Make sure vblank interrupt is still enabled if needed */
radeon_irq_set(rdev);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:

View File

@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
/* Make sure vblank interrupt is still enabled if needed */
radeon_irq_set(rdev);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:

View File

@ -228,8 +228,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
old_start = old_mem->start << PAGE_SHIFT;
new_start = new_mem->start << PAGE_SHIFT;
old_start = (u64)old_mem->start << PAGE_SHIFT;
new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:

View File

@ -244,10 +244,14 @@ static void heartbeat_onchannelcallback(void *context)
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
vmbus_recvpacket(channel, hbeat_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
while (1) {
vmbus_recvpacket(channel, hbeat_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
if (!recvlen)
break;
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];

View File

@ -30,6 +30,7 @@
#define ADT7411_REG_CFG1 0x18
#define ADT7411_CFG1_START_MONITOR (1 << 0)
#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3)
#define ADT7411_REG_CFG2 0x19
#define ADT7411_CFG2_DISABLE_AVG (1 << 5)
@ -292,8 +293,10 @@ static int adt7411_probe(struct i2c_client *client,
mutex_init(&data->device_lock);
mutex_init(&data->update_lock);
/* According to the datasheet, we must only write 1 to bit 3 */
ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
ADT7411_CFG1_START_MONITOR, 1);
ADT7411_CFG1_RESERVED_BIT3
| ADT7411_CFG1_START_MONITOR, 1);
if (ret < 0)
return ret;

View File

@ -371,19 +371,57 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
if (!irqstatus)
return IRQ_NONE;
else if (irqstatus & AT91_TWI_RXRDY)
at91_twi_read_next_byte(dev);
else if (irqstatus & AT91_TWI_TXRDY)
at91_twi_write_next_byte(dev);
/* catch error flags */
dev->transfer_status |= status;
/*
* When a NACK condition is detected, the I2C controller sets the NACK,
* TXCOMP and TXRDY bits all together in the Status Register (SR).
*
* 1 - Handling NACK errors with CPU write transfer.
*
* In such case, we should not write the next byte into the Transmit
* Holding Register (THR) otherwise the I2C controller would start a new
* transfer and the I2C slave is likely to reply by another NACK.
*
* 2 - Handling NACK errors with DMA write transfer.
*
* By setting the TXRDY bit in the SR, the I2C controller also triggers
* the DMA controller to write the next data into the THR. Then the
* result depends on the hardware version of the I2C controller.
*
* 2a - Without support of the Alternative Command mode.
*
* This is the worst case: the DMA controller is triggered to write the
* next data into the THR, hence starting a new transfer: the I2C slave
* is likely to reply by another NACK.
* Concurrently, this interrupt handler is likely to be called to manage
* the first NACK before the I2C controller detects the second NACK and
* sets once again the NACK bit into the SR.
* When handling the first NACK, this interrupt handler disables the I2C
* controller interruptions, especially the NACK interrupt.
* Hence, the NACK bit is pending into the SR. This is why we should
* read the SR to clear all pending interrupts at the beginning of
* at91_do_twi_transfer() before actually starting a new transfer.
*
* 2b - With support of the Alternative Command mode.
*
* When a NACK condition is detected, the I2C controller also locks the
* THR (and sets the LOCK bit in the SR): even though the DMA controller
* is triggered by the TXRDY bit to write the next data into the THR,
* this data actually won't go on the I2C bus hence a second NACK is not
* generated.
*/
if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
at91_disable_twi_interrupts(dev);
complete(&dev->cmd_complete);
} else if (irqstatus & AT91_TWI_RXRDY) {
at91_twi_read_next_byte(dev);
} else if (irqstatus & AT91_TWI_TXRDY) {
at91_twi_write_next_byte(dev);
}
/* catch error flags */
dev->transfer_status |= status;
return IRQ_HANDLED;
}
@ -391,6 +429,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
{
int ret;
bool has_unre_flag = dev->pdata->has_unre_flag;
unsigned sr;
/*
* WARNING: the TXCOMP bit in the Status Register is NOT a clear on
@ -426,13 +465,16 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
INIT_COMPLETION(dev->cmd_complete);
dev->transfer_status = 0;
/* Clear pending interrupts, such as NACK. */
sr = at91_twi_read(dev, AT91_TWI_SR);
if (!dev->buf_len) {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
} else if (dev->msg->flags & I2C_M_RD) {
unsigned start_flags = AT91_TWI_START;
if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
if (sr & AT91_TWI_RXRDY) {
dev_err(dev->dev, "RXRDY still set!");
at91_twi_read(dev, AT91_TWI_RHR);
}

View File

@ -798,13 +798,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@ -821,6 +814,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
pch_adap->dev.parent = &pdev->dev;
}
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
pch_i2c_init(&adap_info->pch_data[i]);

View File

@ -1323,6 +1323,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@ -1341,7 +1342,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);

View File

@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
goto error_ret;
*val = ret;
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret < 0)
goto error_ret;
*val = 0;
*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
ret = IIO_VAL_INT_PLUS_MICRO;
break;

View File

@ -79,6 +79,8 @@ static struct ib_cm {
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
/* Sync on cm change port state */
spinlock_t state_lock;
} cm;
/* Counter indexes ordered by attribute ID */
@ -160,6 +162,8 @@ struct cm_port {
struct ib_mad_agent *mad_agent;
struct kobject port_obj;
u8 port_num;
struct list_head cm_priv_prim_list;
struct list_head cm_priv_altr_list;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};
@ -237,6 +241,12 @@ struct cm_id_private {
u8 service_timeout;
u8 target_ack_delay;
struct list_head prim_list;
struct list_head altr_list;
/* Indicates that the send port mad is registered and av is set */
int prim_send_port_not_ready;
int altr_send_port_not_ready;
struct list_head work_list;
atomic_t work_count;
};
@ -255,19 +265,46 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
struct cm_av *av;
unsigned long flags, flags2;
int ret = 0;
/* don't let the port to be released till the agent is down */
spin_lock_irqsave(&cm.state_lock, flags2);
spin_lock_irqsave(&cm.lock, flags);
if (!cm_id_priv->prim_send_port_not_ready)
av = &cm_id_priv->av;
else if (!cm_id_priv->altr_send_port_not_ready &&
(cm_id_priv->alt_av.port))
av = &cm_id_priv->alt_av;
else {
pr_info("%s: not valid CM id\n", __func__);
ret = -ENODEV;
spin_unlock_irqrestore(&cm.lock, flags);
goto out;
}
spin_unlock_irqrestore(&cm.lock, flags);
/* Make sure the port haven't released the mad yet */
mad_agent = cm_id_priv->av.port->mad_agent;
ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
if (IS_ERR(ah))
return PTR_ERR(ah);
if (!mad_agent) {
pr_info("%s: not a valid MAD agent\n", __func__);
ret = -ENODEV;
goto out;
}
ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto out;
}
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index,
av->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
ret = PTR_ERR(m);
goto out;
}
/* Timeout set by caller if response is expected. */
@ -277,7 +314,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
atomic_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
return 0;
out:
spin_unlock_irqrestore(&cm.state_lock, flags2);
return ret;
}
static int cm_alloc_response_msg(struct cm_port *port,
@ -346,7 +386,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
struct cm_id_private *cm_id_priv)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
@ -376,7 +417,18 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
return 0;
spin_lock_irqsave(&cm.lock, flags);
if (&cm_id_priv->av == av)
list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
else if (&cm_id_priv->alt_av == av)
list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
else
ret = -EINVAL;
spin_unlock_irqrestore(&cm.lock, flags);
return ret;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@ -716,6 +768,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
INIT_LIST_HEAD(&cm_id_priv->prim_list);
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
@ -914,6 +968,15 @@ retest:
break;
}
spin_lock_irq(&cm.lock);
if (!list_empty(&cm_id_priv->altr_list) &&
(!cm_id_priv->altr_send_port_not_ready))
list_del(&cm_id_priv->altr_list);
if (!list_empty(&cm_id_priv->prim_list) &&
(!cm_id_priv->prim_send_port_not_ready))
list_del(&cm_id_priv->prim_list);
spin_unlock_irq(&cm.lock);
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
@ -1137,12 +1200,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto out;
}
ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
cm_id_priv);
if (ret)
goto error1;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path,
&cm_id_priv->alt_av);
&cm_id_priv->alt_av, cm_id_priv);
if (ret)
goto error1;
}
@ -1562,7 +1626,8 @@ static int cm_req_handler(struct cm_work *work)
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
cm_id_priv);
if (ret) {
ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num, 0, &work->path[0].sgid);
@ -1572,7 +1637,8 @@ static int cm_req_handler(struct cm_work *work)
goto rejected;
}
if (req_msg->alt_local_lid) {
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
cm_id_priv);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
@ -2627,7 +2693,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto out;
}
ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
if (ret)
goto out;
cm_id_priv->alt_av.timeout =
@ -2739,7 +2806,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
@ -2931,7 +2999,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
if (ret)
goto out;
@ -3352,7 +3420,9 @@ out:
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
struct cm_av tmp_av;
unsigned long flags;
int tmp_send_port_not_ready;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@ -3361,7 +3431,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
/* Swap address vector */
tmp_av = cm_id_priv->av;
cm_id_priv->av = cm_id_priv->alt_av;
cm_id_priv->alt_av = tmp_av;
/* Swap port send ready state */
tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@ -3767,6 +3844,9 @@ static void cm_add_one(struct ib_device *ib_device)
port->cm_dev = cm_dev;
port->port_num = i;
INIT_LIST_HEAD(&port->cm_priv_prim_list);
INIT_LIST_HEAD(&port->cm_priv_altr_list);
ret = cm_create_port_fs(port);
if (ret)
goto error1;
@ -3813,6 +3893,8 @@ static void cm_remove_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
struct cm_id_private *cm_id_priv;
struct ib_mad_agent *cur_mad_agent;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
@ -3830,10 +3912,22 @@ static void cm_remove_one(struct ib_device *ib_device)
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
/* Mark all the cm_id's as not valid */
spin_lock_irq(&cm.lock);
list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
cm_id_priv->altr_send_port_not_ready = 1;
list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
cm_id_priv->prim_send_port_not_ready = 1;
spin_unlock_irq(&cm.lock);
flush_workqueue(cm.wq);
spin_lock_irq(&cm.state_lock);
cur_mad_agent = port->mad_agent;
port->mad_agent = NULL;
spin_unlock_irq(&cm.state_lock);
ib_unregister_mad_agent(cur_mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
kfree(cm_dev);
}
@ -3846,6 +3940,7 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
spin_lock_init(&cm.state_lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;

View File

@ -106,7 +106,6 @@ struct mcast_group {
atomic_t refcount;
enum mcast_group_state state;
struct ib_sa_query *query;
int query_id;
u16 pkey_index;
u8 leave_state;
int retries;
@ -339,11 +338,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
member->multicast.comp_mask,
3000, GFP_KERNEL, join_handler, group,
&group->query);
if (ret >= 0) {
group->query_id = ret;
ret = 0;
}
return ret;
return (ret > 0) ? 0 : ret;
}
static int send_leave(struct mcast_group *group, u8 leave_state)
@ -363,11 +358,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
IB_SA_MCMEMBER_REC_JOIN_STATE,
3000, GFP_KERNEL, leave_handler,
group, &group->query);
if (ret >= 0) {
group->query_id = ret;
ret = 0;
}
return ret;
return (ret > 0) ? 0 : ret;
}
static void join_group(struct mcast_group *group, struct mcast_member *member,

View File

@ -224,12 +224,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
if (qp != qp->real_qp) {
ib_close_qp(qp);
} else {
if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
ib_destroy_qp(qp);
}
ib_destroy_qp(qp);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}

View File

@ -239,11 +239,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
goto err_dbmap;
goto err_cq_free;
}
return &cq->ibcq;
err_cq_free:
mlx4_cq_free(dev->dev, &cq->mcq);
err_dbmap:
if (context)
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);

View File

@ -483,7 +483,7 @@ static u8 get_leave_state(struct mcast_group *group)
if (!group->members[i])
leave_state |= (1 << i);
return leave_state & (group->rec.scope_join_state & 7);
return leave_state & (group->rec.scope_join_state & 0xf);
}
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@ -558,8 +558,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
} else
mcg_warn_group(group, "DRIVER BUG\n");
} else if (group->state == MCAST_LEAVE_SENT) {
if (group->rec.scope_join_state & 7)
group->rec.scope_join_state &= 0xf8;
if (group->rec.scope_join_state & 0xf)
group->rec.scope_join_state &= 0xf0;
group->state = MCAST_IDLE;
mutex_unlock(&group->lock);
if (release_group(group, 1))
@ -599,7 +599,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
static int handle_join_req(struct mcast_group *group, u8 join_mask,
struct mcast_req *req)
{
u8 group_join_state = group->rec.scope_join_state & 7;
u8 group_join_state = group->rec.scope_join_state & 0xf;
int ref = 0;
u16 status;
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@ -684,8 +684,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
u8 cur_join_state;
resp_join_state = ((struct ib_sa_mcmember_data *)
group->response_sa_mad.data)->scope_join_state & 7;
cur_join_state = group->rec.scope_join_state & 7;
group->response_sa_mad.data)->scope_join_state & 0xf;
cur_join_state = group->rec.scope_join_state & 0xf;
if (method == IB_MGMT_METHOD_GET_RESP) {
/* successfull join */
@ -704,7 +704,7 @@ process_requests:
req = list_first_entry(&group->pending_list, struct mcast_req,
group_list);
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
req_join_state = sa_data->scope_join_state & 0x7;
req_join_state = sa_data->scope_join_state & 0xf;
/* For a leave request, we will immediately answer the VF, and
* update our internal counters. The actual leave will be sent

Some files were not shown because too many files have changed in this diff Show More