Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Martin Schwidefsky:
 "The big thing in this second merge for s390 is the new eBPF JIT from
  Michael which replaces the old 32-bit backend.

  The remaining commits are bug fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/pci: add locking for fmb access
  s390/pci: extract software counters from fmb
  s390/dasd: Fix unresumed device after suspend/resume having no paths
  s390/dasd: fix unresumed device after suspend/resume
  s390/dasd: fix inability to set a DASD device offline
  s390/mm: Fix memory hotplug for unaligned standby memory
  s390/bpf: Add s390x eBPF JIT compiler backend
  s390: Use bool function return values of true/false not 1/0
This commit is contained in:
Linus Torvalds 2015-04-20 10:15:33 -07:00
commit b19a42e3cb
12 changed files with 1375 additions and 868 deletions

View File

@ -115,7 +115,7 @@ config S390
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_BPF_JIT if PACK_STACK select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z9_109_FEATURES
select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK

View File

@ -42,7 +42,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
if (!dev->dma_mask) if (!dev->dma_mask)
return 0; return false;
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= *dev->dma_mask;
} }

View File

@ -7,6 +7,7 @@
#define PCI_BAR_COUNT 6 #define PCI_BAR_COUNT 6
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/mutex.h>
#include <asm-generic/pci.h> #include <asm-generic/pci.h>
#include <asm-generic/pci-dma-compat.h> #include <asm-generic/pci-dma-compat.h>
#include <asm/pci_clp.h> #include <asm/pci_clp.h>
@ -44,10 +45,6 @@ struct zpci_fmb {
u64 rpcit_ops; u64 rpcit_ops;
u64 dma_rbytes; u64 dma_rbytes;
u64 dma_wbytes; u64 dma_wbytes;
/* software counters */
atomic64_t allocated_pages;
atomic64_t mapped_pages;
atomic64_t unmapped_pages;
} __packed __aligned(16); } __packed __aligned(16);
enum zpci_state { enum zpci_state {
@ -80,6 +77,7 @@ struct zpci_dev {
u8 pft; /* pci function type */ u8 pft; /* pci function type */
u16 domain; u16 domain;
struct mutex lock;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 uid; /* user defined id */ u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
@ -111,6 +109,10 @@ struct zpci_dev {
/* Function measurement block */ /* Function measurement block */
struct zpci_fmb *fmb; struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */ u16 fmb_update; /* update interval */
/* software counters */
atomic64_t allocated_pages;
atomic64_t mapped_pages;
atomic64_t unmapped_pages;
enum pci_bus_speed max_bus_speed; enum pci_bus_speed max_bus_speed;

View File

@ -1,134 +1,115 @@
/* /*
* BPF Jit compiler for s390, help functions. * BPF Jit compiler for s390, help functions.
* *
* Copyright IBM Corp. 2012 * Copyright IBM Corp. 2012,2015
* *
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include "bpf_jit.h"
/* /*
* Calling convention: * Calling convention:
* registers %r2, %r6-%r8, %r10-%r11, %r13, %r15 are call saved * registers %r7-%r10, %r11,%r13, and %r15 are call saved
* %r2: skb pointer *
* %r3: offset parameter * Input (64 bit):
* %r5: BPF A accumulator * %r3 (%b2) = offset into skb data
* %r8: return address * %r6 (%b5) = return address
* %r9: save register for skb pointer * %r7 (%b6) = skb pointer
* %r10: skb->data * %r12 = skb data pointer
* %r11: skb->len - skb->data_len (headlen) *
* %r12: BPF X accumulator * Output:
* %r14= %b0 = return value (read skb value)
*
* Work registers: %r2,%r4,%r5,%r14
* *
* skb_copy_bits takes 4 parameters: * skb_copy_bits takes 4 parameters:
* %r2 = skb pointer * %r2 = skb pointer
* %r3 = offset into skb data * %r3 = offset into skb data
* %r4 = pointer to temp buffer * %r4 = pointer to temp buffer
* %r5 = length to copy * %r5 = length to copy
* Return value in %r2: 0 = ok
*
* bpf_internal_load_pointer_neg_helper takes 3 parameters:
* %r2 = skb pointer
* %r3 = offset into data
* %r4 = length to copy
* Return value in %r2: Pointer to data
*/ */
#define SKBDATA %r8
/* A = *(u32 *) (skb->data+K+X) */ #define SKF_MAX_NEG_OFF -0x200000 /* SKF_LL_OFF from filter.h */
ENTRY(sk_load_word_ind)
ar %r3,%r12 # offset += X
bmr %r8 # < 0 -> return with cc
/* A = *(u32 *) (skb->data+K) */ /*
ENTRY(sk_load_word) * Load SIZE bytes from SKB
llgfr %r1,%r3 # extend offset */
ahi %r3,4 # offset + 4 #define sk_load_common(NAME, SIZE, LOAD) \
clr %r11,%r3 # hlen <= offset + 4 ? ENTRY(sk_load_##NAME); \
jl sk_load_word_slow ltgr %r3,%r3; /* Is offset negative? */ \
l %r5,0(%r1,%r10) # get word from skb jl sk_load_##NAME##_slow_neg; \
xr %r1,%r1 # set cc to zero ENTRY(sk_load_##NAME##_pos); \
br %r8 aghi %r3,SIZE; /* Offset + SIZE */ \
clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
jh sk_load_##NAME##_slow; \
LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
b OFF_OK(%r6); /* Return */ \
\
sk_load_##NAME##_slow:; \
lgr %r2,%r7; /* Arg1 = skb pointer */ \
aghi %r3,-SIZE; /* Arg2 = offset */ \
la %r4,STK_OFF_TMP(%r15); /* Arg3 = temp bufffer */ \
lghi %r5,SIZE; /* Arg4 = size */ \
brasl %r14,skb_copy_bits; /* Get data from skb */ \
LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
br %r6; /* Return */
sk_load_word_slow: sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
lgr %r9,%r2 # save %r2 sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
lgr %r3,%r1 # offset
la %r4,160(%r15) # pointer to temp buffer
lghi %r5,4 # 4 bytes
brasl %r14,skb_copy_bits # get data from skb
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
lgr %r2,%r9 # restore %r2
br %r8
/* A = *(u16 *) (skb->data+K+X) */ /*
ENTRY(sk_load_half_ind) * Load 1 byte from SKB (optimized version)
ar %r3,%r12 # offset += X */
bmr %r8 # < 0 -> return with cc /* r14 = *(u8 *) (skb->data+offset) */
/* A = *(u16 *) (skb->data+K) */
ENTRY(sk_load_half)
llgfr %r1,%r3 # extend offset
ahi %r3,2 # offset + 2
clr %r11,%r3 # hlen <= offset + 2 ?
jl sk_load_half_slow
llgh %r5,0(%r1,%r10) # get half from skb
xr %r1,%r1 # set cc to zero
br %r8
sk_load_half_slow:
lgr %r9,%r2 # save %r2
lgr %r3,%r1 # offset
la %r4,162(%r15) # pointer to temp buffer
lghi %r5,2 # 2 bytes
brasl %r14,skb_copy_bits # get data from skb
xc 160(2,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
lgr %r2,%r9 # restore %r2
br %r8
/* A = *(u8 *) (skb->data+K+X) */
ENTRY(sk_load_byte_ind)
ar %r3,%r12 # offset += X
bmr %r8 # < 0 -> return with cc
/* A = *(u8 *) (skb->data+K) */
ENTRY(sk_load_byte) ENTRY(sk_load_byte)
llgfr %r1,%r3 # extend offset ltgr %r3,%r3 # Is offset negative?
clr %r11,%r3 # hlen < offset ? jl sk_load_byte_slow_neg
jle sk_load_byte_slow ENTRY(sk_load_byte_pos)
lhi %r5,0 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
ic %r5,0(%r1,%r10) # get byte from skb jnl sk_load_byte_slow
xr %r1,%r1 # set cc to zero llgc %r14,0(%r3,%r12) # Get byte from skb
br %r8 b OFF_OK(%r6) # Return OK
sk_load_byte_slow: sk_load_byte_slow:
lgr %r9,%r2 # save %r2 lgr %r2,%r7 # Arg1 = skb pointer
lgr %r3,%r1 # offset # Arg2 = offset
la %r4,163(%r15) # pointer to temp buffer la %r4,STK_OFF_TMP(%r15) # Arg3 = pointer to temp buffer
lghi %r5,1 # 1 byte lghi %r5,1 # Arg4 = size (1 byte)
brasl %r14,skb_copy_bits # get data from skb brasl %r14,skb_copy_bits # Get data from skb
xc 160(3,%r15),160(%r15) llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
l %r5,160(%r15) # load result from temp buffer ltgr %r2,%r2 # Set cc to (%r2 != 0)
ltgr %r2,%r2 # set cc to (%r2 != 0) br %r6 # Return cc
lgr %r2,%r9 # restore %r2
br %r8
/* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */ #define sk_negative_common(NAME, SIZE, LOAD) \
ENTRY(sk_load_byte_msh) sk_load_##NAME##_slow_neg:; \
llgfr %r1,%r3 # extend offset cgfi %r3,SKF_MAX_NEG_OFF; \
clr %r11,%r3 # hlen < offset ? jl bpf_error; \
jle sk_load_byte_msh_slow lgr %r2,%r7; /* Arg1 = skb pointer */ \
lhi %r12,0 /* Arg2 = offset */ \
ic %r12,0(%r1,%r10) # get byte from skb lghi %r4,SIZE; /* Arg3 = size */ \
nill %r12,0x0f brasl %r14,bpf_internal_load_pointer_neg_helper; \
sll %r12,2 ltgr %r2,%r2; \
xr %r1,%r1 # set cc to zero jz bpf_error; \
br %r8 LOAD %r14,0(%r2); /* Get data from pointer */ \
xr %r3,%r3; /* Set cc to zero */ \
br %r6; /* Return cc */
sk_load_byte_msh_slow: sk_negative_common(word, 4, llgf)
lgr %r9,%r2 # save %r2 sk_negative_common(half, 2, llgh)
lgr %r3,%r1 # offset sk_negative_common(byte, 1, llgc)
la %r4,163(%r15) # pointer to temp buffer
lghi %r5,1 # 1 byte bpf_error:
brasl %r14,skb_copy_bits # get data from skb # force a return 0 from jit handler
xc 160(3,%r15),160(%r15) ltgr %r15,%r15 # Set condition code
l %r12,160(%r15) # load result from temp buffer br %r6
nill %r12,0x0f
sll %r12,2
ltgr %r2,%r2 # set cc to (%r2 != 0)
lgr %r2,%r9 # restore %r2
br %r8

58
arch/s390/net/bpf_jit.h Normal file
View File

@ -0,0 +1,58 @@
/*
* BPF Jit compiler defines
*
* Copyright IBM Corp. 2012,2015
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#ifndef __ARCH_S390_NET_BPF_JIT_H
#define __ARCH_S390_NET_BPF_JIT_H
#ifndef __ASSEMBLY__
#include <linux/filter.h>
#include <linux/types.h>
extern u8 sk_load_word_pos[], sk_load_half_pos[], sk_load_byte_pos[];
extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
#endif /* __ASSEMBLY__ */
/*
* Stackframe layout (packed stack):
*
* ^ high
* +---------------+ |
* | old backchain | |
* +---------------+ |
* | r15 - r6 | |
* BFP -> +===============+ |
* | | |
* | BPF stack | |
* | | |
* +---------------+ |
* | 8 byte hlen | |
* R15+168 -> +---------------+ |
* | 4 byte align | |
* +---------------+ |
* | 4 byte temp | |
* | for bpf_jit.S | |
* R15+160 -> +---------------+ |
* | new backchain | |
* R15+152 -> +---------------+ |
* | + 152 byte SA | |
* R15 -> +---------------+ + low
*
* We get 160 bytes stack space from calling function, but only use
* 11 * 8 byte (old backchain + r15 - r6) for storing registers.
*/
#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8))
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
/* Offset to skip condition code check */
#define OFF_OK 4
#endif /* __ARCH_S390_NET_BPF_JIT_H */

File diff suppressed because it is too large Load Diff

View File

@ -190,6 +190,11 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
return -ENOMEM; return -ENOMEM;
WARN_ON((u64) zdev->fmb & 0xf); WARN_ON((u64) zdev->fmb & 0xf);
/* reset software counters */
atomic64_set(&zdev->allocated_pages, 0);
atomic64_set(&zdev->mapped_pages, 0);
atomic64_set(&zdev->unmapped_pages, 0);
args.fmb_addr = virt_to_phys(zdev->fmb); args.fmb_addr = virt_to_phys(zdev->fmb);
return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
} }
@ -822,6 +827,7 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc) if (rc)
goto out; goto out;
mutex_init(&zdev->lock);
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
rc = zpci_enable_device(zdev); rc = zpci_enable_device(zdev);
if (rc) if (rc)

View File

@ -31,12 +31,25 @@ static char *pci_perf_names[] = {
"Refresh operations", "Refresh operations",
"DMA read bytes", "DMA read bytes",
"DMA write bytes", "DMA write bytes",
/* software counters */ };
static char *pci_sw_names[] = {
"Allocated pages", "Allocated pages",
"Mapped pages", "Mapped pages",
"Unmapped pages", "Unmapped pages",
}; };
static void pci_sw_counter_show(struct seq_file *m)
{
struct zpci_dev *zdev = m->private;
atomic64_t *counter = &zdev->allocated_pages;
int i;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
atomic64_read(counter));
}
static int pci_perf_show(struct seq_file *m, void *v) static int pci_perf_show(struct seq_file *m, void *v)
{ {
struct zpci_dev *zdev = m->private; struct zpci_dev *zdev = m->private;
@ -45,7 +58,10 @@ static int pci_perf_show(struct seq_file *m, void *v)
if (!zdev) if (!zdev)
return 0; return 0;
mutex_lock(&zdev->lock);
if (!zdev->fmb) { if (!zdev->fmb) {
mutex_unlock(&zdev->lock);
seq_puts(m, "FMB statistics disabled\n"); seq_puts(m, "FMB statistics disabled\n");
return 0; return 0;
} }
@ -65,12 +81,9 @@ static int pci_perf_show(struct seq_file *m, void *v)
for (i = 4; i < 6; i++) for (i = 4; i < 6; i++)
seq_printf(m, "%26s:\t%llu\n", seq_printf(m, "%26s:\t%llu\n",
pci_perf_names[i], *(stat + i)); pci_perf_names[i], *(stat + i));
/* software counters */
for (i = 6; i < ARRAY_SIZE(pci_perf_names); i++)
seq_printf(m, "%26s:\t%llu\n",
pci_perf_names[i],
atomic64_read((atomic64_t *) (stat + i)));
pci_sw_counter_show(m);
mutex_unlock(&zdev->lock);
return 0; return 0;
} }
@ -88,19 +101,17 @@ static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
if (rc) if (rc)
return rc; return rc;
mutex_lock(&zdev->lock);
switch (val) { switch (val) {
case 0: case 0:
rc = zpci_fmb_disable_device(zdev); rc = zpci_fmb_disable_device(zdev);
if (rc)
return rc;
break; break;
case 1: case 1:
rc = zpci_fmb_enable_device(zdev); rc = zpci_fmb_enable_device(zdev);
if (rc)
return rc;
break; break;
} }
return count; mutex_unlock(&zdev->lock);
return rc ? rc : count;
} }
static int pci_perf_seq_open(struct inode *inode, struct file *filp) static int pci_perf_seq_open(struct inode *inode, struct file *filp)

View File

@ -300,7 +300,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
flags |= ZPCI_TABLE_PROTECTED; flags |= ZPCI_TABLE_PROTECTED;
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, &zdev->fmb->mapped_pages); atomic64_add(nr_pages, &zdev->mapped_pages);
return dma_addr + (offset & ~PAGE_MASK); return dma_addr + (offset & ~PAGE_MASK);
} }
@ -328,7 +328,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
zpci_err_hex(&dma_addr, sizeof(dma_addr)); zpci_err_hex(&dma_addr, sizeof(dma_addr));
} }
atomic64_add(npages, &zdev->fmb->unmapped_pages); atomic64_add(npages, &zdev->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
dma_free_iommu(zdev, iommu_page_index, npages); dma_free_iommu(zdev, iommu_page_index, npages);
} }
@ -357,7 +357,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL; return NULL;
} }
atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages); atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
if (dma_handle) if (dma_handle)
*dma_handle = map; *dma_handle = map;
return (void *) pa; return (void *) pa;
@ -370,7 +370,7 @@ static void s390_dma_free(struct device *dev, size_t size,
struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages); atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long) pa, get_order(size)); free_pages((unsigned long) pa, get_order(size));
} }

View File

@ -579,7 +579,8 @@ void dasd_kick_device(struct dasd_device *device)
{ {
dasd_get_device(device); dasd_get_device(device);
/* queue call to dasd_kick_device to the kernel event daemon. */ /* queue call to dasd_kick_device to the kernel event daemon. */
schedule_work(&device->kick_work); if (!schedule_work(&device->kick_work))
dasd_put_device(device);
} }
EXPORT_SYMBOL(dasd_kick_device); EXPORT_SYMBOL(dasd_kick_device);
@ -599,7 +600,8 @@ void dasd_reload_device(struct dasd_device *device)
{ {
dasd_get_device(device); dasd_get_device(device);
/* queue call to dasd_reload_device to the kernel event daemon. */ /* queue call to dasd_reload_device to the kernel event daemon. */
schedule_work(&device->reload_device); if (!schedule_work(&device->reload_device))
dasd_put_device(device);
} }
EXPORT_SYMBOL(dasd_reload_device); EXPORT_SYMBOL(dasd_reload_device);
@ -619,7 +621,8 @@ void dasd_restore_device(struct dasd_device *device)
{ {
dasd_get_device(device); dasd_get_device(device);
/* queue call to dasd_restore_device to the kernel event daemon. */ /* queue call to dasd_restore_device to the kernel event daemon. */
schedule_work(&device->restore_device); if (!schedule_work(&device->restore_device))
dasd_put_device(device);
} }
/* /*
@ -2163,18 +2166,22 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
cqr->intrc = -ENOLINK; cqr->intrc = -ENOLINK;
continue; continue;
} }
/* Don't try to start requests if device is stopped */ /*
if (interruptible) { * Don't try to start requests if device is stopped
rc = wait_event_interruptible( * except path verification requests
generic_waitq, !(device->stopped)); */
if (rc == -ERESTARTSYS) { if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED; if (interruptible) {
maincqr->intrc = rc; rc = wait_event_interruptible(
continue; generic_waitq, !(device->stopped));
} if (rc == -ERESTARTSYS) {
} else cqr->status = DASD_CQR_FAILED;
wait_event(generic_waitq, !(device->stopped)); maincqr->intrc = rc;
continue;
}
} else
wait_event(generic_waitq, !(device->stopped));
}
if (!cqr->callback) if (!cqr->callback)
cqr->callback = dasd_wakeup_cb; cqr->callback = dasd_wakeup_cb;
@ -2524,6 +2531,11 @@ static void __dasd_process_request_queue(struct dasd_block *block)
__blk_end_request_all(req, -EIO); __blk_end_request_all(req, -EIO);
return; return;
} }
/* if device ist stopped do not fetch new requests */
if (basedev->stopped)
return;
/* Now we try to fetch requests from the request queue */ /* Now we try to fetch requests from the request queue */
while ((req = blk_peek_request(queue))) { while ((req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY && if (basedev->features & DASD_FEATURE_READONLY &&

View File

@ -1628,7 +1628,8 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
return; return;
} }
/* queue call to do_validate_server to the kernel event daemon. */ /* queue call to do_validate_server to the kernel event daemon. */
schedule_work(&device->kick_validate); if (!schedule_work(&device->kick_validate))
dasd_put_device(device);
} }
static u32 get_fcx_max_data(struct dasd_device *device) static u32 get_fcx_max_data(struct dasd_device *device)

View File

@ -315,10 +315,29 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size,
rc |= sclp_assign_storage(incr->rn); rc |= sclp_assign_storage(incr->rn);
else else
sclp_unassign_storage(incr->rn); sclp_unassign_storage(incr->rn);
if (rc == 0)
incr->standby = online ? 0 : 1;
} }
return rc ? -EIO : 0; return rc ? -EIO : 0;
} }
static bool contains_standby_increment(unsigned long start, unsigned long end)
{
struct memory_increment *incr;
unsigned long istart;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (end - 1 < istart)
continue;
if (start > istart + sclp_rzm - 1)
continue;
if (incr->standby)
return true;
}
return false;
}
static int sclp_mem_notifier(struct notifier_block *nb, static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
@ -334,8 +353,16 @@ static int sclp_mem_notifier(struct notifier_block *nb,
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
sclp_attach_storage(id); sclp_attach_storage(id);
switch (action) { switch (action) {
case MEM_ONLINE:
case MEM_GOING_OFFLINE: case MEM_GOING_OFFLINE:
/*
* We do not allow to set memory blocks offline that contain
* standby memory. This is done to simplify the "memory online"
* case.
*/
if (contains_standby_increment(start, start + size))
rc = -EPERM;
break;
case MEM_ONLINE:
case MEM_CANCEL_OFFLINE: case MEM_CANCEL_OFFLINE:
break; break;
case MEM_GOING_ONLINE: case MEM_GOING_ONLINE:
@ -361,6 +388,21 @@ static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier, .notifier_call = sclp_mem_notifier,
}; };
static void __init align_to_block_size(unsigned long long *start,
unsigned long long *size)
{
unsigned long long start_align, size_align, alignment;
alignment = memory_block_size_bytes();
start_align = roundup(*start, alignment);
size_align = rounddown(*start + *size, alignment) - start_align;
pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
*start, size_align >> 20, *size >> 20);
*start = start_align;
*size = size_align;
}
static void __init add_memory_merged(u16 rn) static void __init add_memory_merged(u16 rn)
{ {
static u16 first_rn, num; static u16 first_rn, num;
@ -382,7 +424,9 @@ static void __init add_memory_merged(u16 rn)
goto skip_add; goto skip_add;
if (memory_end_set && (start + size > memory_end)) if (memory_end_set && (start + size > memory_end))
size = memory_end - start; size = memory_end - start;
add_memory(0, start, size); align_to_block_size(&start, &size);
if (size)
add_memory(0, start, size);
skip_add: skip_add:
first_rn = rn; first_rn = rn;
num = 1; num = 1;