mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-02-04 10:09:06 +00:00
Merge branch 'upstream-fixes' into upstream
Conflicts: drivers/scsi/libata-core.c
This commit is contained in:
commit
3d71b3b0b6
9
CREDITS
9
CREDITS
@ -3241,14 +3241,9 @@ S: 12725 SW Millikan Way, Suite 400
|
||||
S: Beaverton, Oregon 97005
|
||||
S: USA
|
||||
|
||||
N: Marcelo W. Tosatti
|
||||
E: marcelo.tosatti@cyclades.com
|
||||
D: Miscellaneous kernel hacker
|
||||
N: Marcelo Tosatti
|
||||
E: marcelo@kvack.org
|
||||
D: v2.4 kernel maintainer
|
||||
D: Current pc300/cyclades maintainer
|
||||
S: Cyclades Corporation
|
||||
S: Av Cristovao Colombo, 462. Floresta.
|
||||
S: Porto Alegre
|
||||
S: Brazil
|
||||
|
||||
N: Stefan Traby
|
||||
|
@ -1721,11 +1721,6 @@ Your cooperation is appreciated.
|
||||
These devices support the same API as the generic SCSI
|
||||
devices.
|
||||
|
||||
97 block Packet writing for CD/DVD devices
|
||||
0 = /dev/pktcdvd0 First packet-writing module
|
||||
1 = /dev/pktcdvd1 Second packet-writing module
|
||||
...
|
||||
|
||||
98 char Control and Measurement Device (comedi)
|
||||
0 = /dev/comedi0 First comedi device
|
||||
1 = /dev/comedi1 Second comedi device
|
||||
|
@ -57,6 +57,15 @@ Who: Jody McIntyre <scjody@steamballoon.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: sbp2: module parameter "force_inquiry_hack"
|
||||
When: July 2006
|
||||
Why: Superceded by parameter "workarounds". Both parameters are meant to be
|
||||
used ad-hoc and for single devices only, i.e. not in modprobe.conf,
|
||||
therefore the impact of this feature replacement should be low.
|
||||
Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
|
||||
When: July 2006
|
||||
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
|
||||
|
@ -1031,7 +1031,7 @@ conflict on any particular lock.
|
||||
LOCKS VS MEMORY ACCESSES
|
||||
------------------------
|
||||
|
||||
Consider the following: the system has a pair of spinlocks (N) and (Q), and
|
||||
Consider the following: the system has a pair of spinlocks (M) and (Q), and
|
||||
three CPUs; then should the following sequence of events occur:
|
||||
|
||||
CPU 1 CPU 2
|
||||
@ -1678,7 +1678,7 @@ CPU's caches by some other cache event:
|
||||
smp_wmb();
|
||||
<A:modify v=2> <C:busy>
|
||||
<C:queue v=2>
|
||||
p = &b; q = p;
|
||||
p = &v; q = p;
|
||||
<D:request p>
|
||||
<B:modify p=&v> <D:commit p=&v>
|
||||
<D:read p>
|
||||
|
234
Documentation/spi/pxa2xx
Normal file
234
Documentation/spi/pxa2xx
Normal file
@ -0,0 +1,234 @@
|
||||
PXA2xx SPI on SSP driver HOWTO
|
||||
===================================================
|
||||
This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
|
||||
synchronous serial port into a SPI master controller
|
||||
(see Documentation/spi/spi_summary). The driver has the following features
|
||||
|
||||
- Support for any PXA2xx SSP
|
||||
- SSP PIO and SSP DMA data transfers.
|
||||
- External and Internal (SSPFRM) chip selects.
|
||||
- Per slave device (chip) configuration.
|
||||
- Full suspend, freeze, resume support.
|
||||
|
||||
The driver is built around a "spi_message" fifo serviced by workqueue and a
|
||||
tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
|
||||
(pump_transfer) is responsible for queuing SPI transactions and setting up and
|
||||
launching the dma/interrupt driven transfers.
|
||||
|
||||
Declaring PXA2xx Master Controllers
|
||||
-----------------------------------
|
||||
Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
|
||||
"platform device". The master configuration is passed to the driver via a table
|
||||
found in include/asm-arm/arch-pxa/pxa2xx_spi.h:
|
||||
|
||||
struct pxa2xx_spi_master {
|
||||
enum pxa_ssp_type ssp_type;
|
||||
u32 clock_enable;
|
||||
u16 num_chipselect;
|
||||
u8 enable_dma;
|
||||
};
|
||||
|
||||
The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and
|
||||
informs the driver which features a particular SSP supports.
|
||||
|
||||
The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the
|
||||
corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See
|
||||
the "PXA2xx Developer Manual" section "Clocks and Power Management".
|
||||
|
||||
The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of
|
||||
slave device (chips) attached to this SPI master.
|
||||
|
||||
The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should
|
||||
be used. This caused the driver to acquire two DMA channels: rx_channel and
|
||||
tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
|
||||
See the "PXA2xx Developer Manual" section "DMA Controller".
|
||||
|
||||
NSSP MASTER SAMPLE
|
||||
------------------
|
||||
Below is a sample configuration using the PXA255 NSSP.
|
||||
|
||||
static struct resource pxa_spi_nssp_resources[] = {
|
||||
[0] = {
|
||||
.start = __PREG(SSCR0_P(2)), /* Start address of NSSP */
|
||||
.end = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ_NSSP, /* NSSP IRQ */
|
||||
.end = IRQ_NSSP,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct pxa2xx_spi_master pxa_nssp_master_info = {
|
||||
.ssp_type = PXA25x_NSSP, /* Type of SSP */
|
||||
.clock_enable = CKEN9_NSSP, /* NSSP Peripheral clock */
|
||||
.num_chipselect = 1, /* Matches the number of chips attached to NSSP */
|
||||
.enable_dma = 1, /* Enables NSSP DMA */
|
||||
};
|
||||
|
||||
static struct platform_device pxa_spi_nssp = {
|
||||
.name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */
|
||||
.id = 2, /* Bus number, MUST MATCH SSP number 1..n */
|
||||
.resource = pxa_spi_nssp_resources,
|
||||
.num_resources = ARRAY_SIZE(pxa_spi_nssp_resources),
|
||||
.dev = {
|
||||
.platform_data = &pxa_nssp_master_info, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
&pxa_spi_nssp,
|
||||
};
|
||||
|
||||
static void __init board_init(void)
|
||||
{
|
||||
(void)platform_add_device(devices, ARRAY_SIZE(devices));
|
||||
}
|
||||
|
||||
Declaring Slave Devices
|
||||
-----------------------
|
||||
Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
|
||||
using the "spi_board_info" structure found in "linux/spi/spi.h". See
|
||||
"Documentation/spi/spi_summary" for additional information.
|
||||
|
||||
Each slave device attached to the PXA must provide slave specific configuration
|
||||
information via the structure "pxa2xx_spi_chip" found in
|
||||
"include/asm-arm/arch-pxa/pxa2xx_spi.h". The pxa2xx_spi master controller driver
|
||||
will uses the configuration whenever the driver communicates with the slave
|
||||
device.
|
||||
|
||||
struct pxa2xx_spi_chip {
|
||||
u8 tx_threshold;
|
||||
u8 rx_threshold;
|
||||
u8 dma_burst_size;
|
||||
u32 timeout_microsecs;
|
||||
u8 enable_loopback;
|
||||
void (*cs_control)(u32 command);
|
||||
};
|
||||
|
||||
The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
|
||||
used to configure the SSP hardware fifo. These fields are critical to the
|
||||
performance of pxa2xx_spi driver and misconfiguration will result in rx
|
||||
fifo overruns (especially in PIO mode transfers). Good default values are
|
||||
|
||||
.tx_threshold = 12,
|
||||
.rx_threshold = 4,
|
||||
|
||||
The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA
|
||||
engine and is related the "spi_device.bits_per_word" field. Read and understand
|
||||
the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
|
||||
to determine the correct value. An SSP configured for byte-wide transfers would
|
||||
use a value of 8.
|
||||
|
||||
The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle
|
||||
trailing bytes in the SSP receiver fifo. The correct value for this field is
|
||||
dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
|
||||
slave device. Please note the the PXA2xx SSP 1 does not support trailing byte
|
||||
timeouts and must busy-wait any trailing bytes.
|
||||
|
||||
The "pxa2xx_spi_chip.enable_loopback" field is used to place the SSP porting
|
||||
into internal loopback mode. In this mode the SSP controller internally
|
||||
connects the SSPTX pin the the SSPRX pin. This is useful for initial setup
|
||||
testing.
|
||||
|
||||
The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
|
||||
function for asserting/deasserting a slave device chip select. If the field is
|
||||
NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
|
||||
configured to use SSPFRM instead.
|
||||
|
||||
NSSP SALVE SAMPLE
|
||||
-----------------
|
||||
The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
|
||||
"spi_board_info.controller_data" field. Below is a sample configuration using
|
||||
the PXA255 NSSP.
|
||||
|
||||
/* Chip Select control for the CS8415A SPI slave device */
|
||||
static void cs8415a_cs_control(u32 command)
|
||||
{
|
||||
if (command & PXA2XX_CS_ASSERT)
|
||||
GPCR(2) = GPIO_bit(2);
|
||||
else
|
||||
GPSR(2) = GPIO_bit(2);
|
||||
}
|
||||
|
||||
/* Chip Select control for the CS8405A SPI slave device */
|
||||
static void cs8405a_cs_control(u32 command)
|
||||
{
|
||||
if (command & PXA2XX_CS_ASSERT)
|
||||
GPCR(3) = GPIO_bit(3);
|
||||
else
|
||||
GPSR(3) = GPIO_bit(3);
|
||||
}
|
||||
|
||||
static struct pxa2xx_spi_chip cs8415a_chip_info = {
|
||||
.tx_threshold = 12, /* SSP hardward FIFO threshold */
|
||||
.rx_threshold = 4, /* SSP hardward FIFO threshold */
|
||||
.dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
|
||||
.timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
|
||||
.cs_control = cs8415a_cs_control, /* Use external chip select */
|
||||
};
|
||||
|
||||
static struct pxa2xx_spi_chip cs8405a_chip_info = {
|
||||
.tx_threshold = 12, /* SSP hardward FIFO threshold */
|
||||
.rx_threshold = 4, /* SSP hardward FIFO threshold */
|
||||
.dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
|
||||
.timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
|
||||
.cs_control = cs8405a_cs_control, /* Use external chip select */
|
||||
};
|
||||
|
||||
static struct spi_board_info streetracer_spi_board_info[] __initdata = {
|
||||
{
|
||||
.modalias = "cs8415a", /* Name of spi_driver for this device */
|
||||
.max_speed_hz = 3686400, /* Run SSP as fast a possbile */
|
||||
.bus_num = 2, /* Framework bus number */
|
||||
.chip_select = 0, /* Framework chip select */
|
||||
.platform_data = NULL; /* No spi_driver specific config */
|
||||
.controller_data = &cs8415a_chip_info, /* Master chip config */
|
||||
.irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
|
||||
},
|
||||
{
|
||||
.modalias = "cs8405a", /* Name of spi_driver for this device */
|
||||
.max_speed_hz = 3686400, /* Run SSP as fast a possbile */
|
||||
.bus_num = 2, /* Framework bus number */
|
||||
.chip_select = 1, /* Framework chip select */
|
||||
.controller_data = &cs8405a_chip_info, /* Master chip config */
|
||||
.irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
|
||||
},
|
||||
};
|
||||
|
||||
static void __init streetracer_init(void)
|
||||
{
|
||||
spi_register_board_info(streetracer_spi_board_info,
|
||||
ARRAY_SIZE(streetracer_spi_board_info));
|
||||
}
|
||||
|
||||
|
||||
DMA and PIO I/O Support
|
||||
-----------------------
|
||||
The pxa2xx_spi driver support both DMA and interrupt driven PIO message
|
||||
transfers. The driver defaults to PIO mode and DMA transfers must enabled by
|
||||
setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and and
|
||||
ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero. The DMA
|
||||
mode support both coherent and stream based DMA mappings.
|
||||
|
||||
The following logic is used to determine the type of I/O to be used on
|
||||
a per "spi_transfer" basis:
|
||||
|
||||
if !enable_dma or dma_burst_size == 0 then
|
||||
always use PIO transfers
|
||||
|
||||
if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then
|
||||
use coherent DMA mode
|
||||
|
||||
if rx_buf and tx_buf are aligned on 8 byte boundary then
|
||||
use streaming DMA mode
|
||||
|
||||
otherwise
|
||||
use PIO transfer
|
||||
|
||||
THANKS TO
|
||||
---------
|
||||
|
||||
David Brownell and others for mentoring the development of this driver.
|
||||
|
@ -414,7 +414,33 @@ to get the driver-private data allocated for that device.
|
||||
The driver will initialize the fields of that spi_master, including the
|
||||
bus number (maybe the same as the platform device ID) and three methods
|
||||
used to interact with the SPI core and SPI protocol drivers. It will
|
||||
also initialize its own internal state.
|
||||
also initialize its own internal state. (See below about bus numbering
|
||||
and those methods.)
|
||||
|
||||
After you initialize the spi_master, then use spi_register_master() to
|
||||
publish it to the rest of the system. At that time, device nodes for
|
||||
the controller and any predeclared spi devices will be made available,
|
||||
and the driver model core will take care of binding them to drivers.
|
||||
|
||||
If you need to remove your SPI controller driver, spi_unregister_master()
|
||||
will reverse the effect of spi_register_master().
|
||||
|
||||
|
||||
BUS NUMBERING
|
||||
|
||||
Bus numbering is important, since that's how Linux identifies a given
|
||||
SPI bus (shared SCK, MOSI, MISO). Valid bus numbers start at zero. On
|
||||
SOC systems, the bus numbers should match the numbers defined by the chip
|
||||
manufacturer. For example, hardware controller SPI2 would be bus number 2,
|
||||
and spi_board_info for devices connected to it would use that number.
|
||||
|
||||
If you don't have such hardware-assigned bus number, and for some reason
|
||||
you can't just assign them, then provide a negative bus number. That will
|
||||
then be replaced by a dynamically assigned number. You'd then need to treat
|
||||
this as a non-static configuration (see above).
|
||||
|
||||
|
||||
SPI MASTER METHODS
|
||||
|
||||
master->setup(struct spi_device *spi)
|
||||
This sets up the device clock rate, SPI mode, and word sizes.
|
||||
@ -431,6 +457,9 @@ also initialize its own internal state.
|
||||
state it dynamically associates with that device. If you do that,
|
||||
be sure to provide the cleanup() method to free that state.
|
||||
|
||||
|
||||
SPI MESSAGE QUEUE
|
||||
|
||||
The bulk of the driver will be managing the I/O queue fed by transfer().
|
||||
|
||||
That queue could be purely conceptual. For example, a driver used only
|
||||
@ -440,6 +469,9 @@ But the queue will probably be very real, using message->queue, PIO,
|
||||
often DMA (especially if the root filesystem is in SPI flash), and
|
||||
execution contexts like IRQ handlers, tasklets, or workqueues (such
|
||||
as keventd). Your driver can be as fancy, or as simple, as you need.
|
||||
Such a transfer() method would normally just add the message to a
|
||||
queue, and then start some asynchronous transfer engine (unless it's
|
||||
already running).
|
||||
|
||||
|
||||
THANKS TO
|
||||
|
@ -36,6 +36,9 @@ timeout or margin. The simplest way to ping the watchdog is to write
|
||||
some data to the device. So a very simple watchdog daemon would look
|
||||
like this:
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
int fd=open("/dev/watchdog",O_WRONLY);
|
||||
if (fd==-1) {
|
||||
|
13
MAINTAINERS
13
MAINTAINERS
@ -1603,6 +1603,11 @@ M: James.Bottomley@HansenPartnership.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
LED SUBSYSTEM
|
||||
P: Richard Purdie
|
||||
M: rpurdie@rpsys.net
|
||||
S: Maintained
|
||||
|
||||
LEGO USB Tower driver
|
||||
P: Juergen Stuber
|
||||
M: starblue@users.sourceforge.net
|
||||
@ -1662,7 +1667,7 @@ S: Maintained
|
||||
|
||||
LINUX FOR POWERPC EMBEDDED PPC8XX
|
||||
P: Marcelo Tosatti
|
||||
M: marcelo.tosatti@cyclades.com
|
||||
M: marcelo@kvack.org
|
||||
W: http://www.penguinppc.org/
|
||||
L: linuxppc-embedded@ozlabs.org
|
||||
S: Maintained
|
||||
@ -2513,6 +2518,12 @@ M: perex@suse.cz
|
||||
L: alsa-devel@alsa-project.org
|
||||
S: Maintained
|
||||
|
||||
SPI SUBSYSTEM
|
||||
P: David Brownell
|
||||
M: dbrownell@users.sourceforge.net
|
||||
L: spi-devel-general@lists.sourceforge.net
|
||||
S: Maintained
|
||||
|
||||
TPM DEVICE DRIVER
|
||||
P: Kylene Hall
|
||||
M: kjhall@us.ibm.com
|
||||
|
@ -99,6 +99,8 @@ int main(void)
|
||||
DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name));
|
||||
DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io));
|
||||
DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst));
|
||||
BLANK();
|
||||
DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list));
|
||||
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
|
||||
DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags));
|
||||
return 0;
|
||||
|
@ -143,12 +143,23 @@ static struct dma_ops isa_dma_ops = {
|
||||
.residue = isa_get_dma_residue,
|
||||
};
|
||||
|
||||
static struct resource dma_resources[] = {
|
||||
{ "dma1", 0x0000, 0x000f },
|
||||
{ "dma low page", 0x0080, 0x008f },
|
||||
{ "dma2", 0x00c0, 0x00df },
|
||||
{ "dma high page", 0x0480, 0x048f }
|
||||
};
|
||||
static struct resource dma_resources[] = { {
|
||||
.name = "dma1",
|
||||
.start = 0x0000,
|
||||
.end = 0x000f
|
||||
}, {
|
||||
.name = "dma low page",
|
||||
.start = 0x0080,
|
||||
.end = 0x008f
|
||||
}, {
|
||||
.name = "dma2",
|
||||
.start = 0x00c0,
|
||||
.end = 0x00df
|
||||
}, {
|
||||
.name = "dma high page",
|
||||
.start = 0x0480,
|
||||
.end = 0x048f
|
||||
} };
|
||||
|
||||
void __init isa_init_dma(dma_t *dma)
|
||||
{
|
||||
|
@ -311,7 +311,7 @@ void free_thread_info(struct thread_info *thread)
|
||||
struct thread_info_list *th = &get_cpu_var(thread_info_list);
|
||||
if (th->nr < EXTRA_TASK_STRUCT) {
|
||||
unsigned long *p = (unsigned long *)thread;
|
||||
p[0] = th->head;
|
||||
p[0] = (unsigned long)th->head;
|
||||
th->head = p;
|
||||
th->nr += 1;
|
||||
put_cpu_var(thread_info_list);
|
||||
|
@ -122,7 +122,7 @@ ENTRY(c_backtrace)
|
||||
#define reg r5
|
||||
#define stack r6
|
||||
|
||||
.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
|
||||
.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, r8, lr}
|
||||
mov stack, r0
|
||||
mov instr, r1
|
||||
mov reg, #9
|
||||
@ -145,7 +145,7 @@ ENTRY(c_backtrace)
|
||||
adrne r0, .Lcr
|
||||
blne printk
|
||||
mov r0, stack
|
||||
LOADREGS(fd, sp!, {instr, reg, stack, r7, pc})
|
||||
LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc})
|
||||
|
||||
.Lfp: .asciz " r%d = %08X%c"
|
||||
.Lcr: .asciz "\n"
|
||||
|
@ -189,12 +189,12 @@ ENTRY(__do_div64)
|
||||
moveq pc, lr
|
||||
|
||||
@ Division by 0:
|
||||
str lr, [sp, #-4]!
|
||||
str lr, [sp, #-8]!
|
||||
bl __div0
|
||||
|
||||
@ as wrong as it could be...
|
||||
mov yl, #0
|
||||
mov yh, #0
|
||||
mov xh, #0
|
||||
ldr pc, [sp], #4
|
||||
ldr pc, [sp], #8
|
||||
|
||||
|
@ -95,7 +95,10 @@ static void __init mainstone_init_irq(void)
|
||||
for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
|
||||
set_irq_chip(irq, &mainstone_irq_chip);
|
||||
set_irq_handler(irq, do_level_IRQ);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
|
||||
else
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
set_irq_flags(MAINSTONE_IRQ(8), 0);
|
||||
set_irq_flags(MAINSTONE_IRQ(12), 0);
|
||||
|
@ -137,8 +137,11 @@ static struct amba_device *amba_devs[] __initdata = {
|
||||
static void __init gic_init_irq(void)
|
||||
{
|
||||
#ifdef CONFIG_REALVIEW_MPCORE
|
||||
unsigned int pldctrl;
|
||||
writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
|
||||
writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8);
|
||||
pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + 0xd8);
|
||||
pldctrl |= 0x00800000; /* New irq mode */
|
||||
writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + 0xd8);
|
||||
writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
|
||||
#endif
|
||||
gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
|
||||
|
@ -59,8 +59,7 @@ ENTRY(s3c2410_cpu_suspend)
|
||||
mrc p15, 0, r5, c13, c0, 0 @ PID
|
||||
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
|
||||
mrc p15, 0, r7, c2, c0, 0 @ translation table base address
|
||||
mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register
|
||||
mrc p15, 0, r9, c1, c0, 0 @ control register
|
||||
mrc p15, 0, r8, c1, c0, 0 @ control register
|
||||
|
||||
stmia r0, { r4 - r13 }
|
||||
|
||||
@ -165,7 +164,6 @@ ENTRY(s3c2410_cpu_resume)
|
||||
mcr p15, 0, r5, c13, c0, 0 @ PID
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
|
||||
mcr p15, 0, r7, c2, c0, 0 @ translation table base
|
||||
mcr p15, 0, r8, c1, c1, 0 @ auxilliary control
|
||||
|
||||
#ifdef CONFIG_DEBUG_RESUME
|
||||
mov r3, #'R'
|
||||
@ -173,7 +171,7 @@ ENTRY(s3c2410_cpu_resume)
|
||||
#endif
|
||||
|
||||
ldr r2, =resume_with_mmu
|
||||
mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc
|
||||
mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc
|
||||
nop @ second-to-last before mmu
|
||||
mov pc, r2 @ go back to virtual address
|
||||
|
||||
|
@ -141,7 +141,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
||||
return NULL;
|
||||
addr = (unsigned long)area->addr;
|
||||
if (remap_area_pages(addr, pfn, size, flags)) {
|
||||
vfree((void *)addr);
|
||||
vunmap((void *)addr);
|
||||
return NULL;
|
||||
}
|
||||
return (void __iomem *) (offset + (char *)addr);
|
||||
@ -173,7 +173,7 @@ EXPORT_SYMBOL(__ioremap);
|
||||
|
||||
void __iounmap(void __iomem *addr)
|
||||
{
|
||||
vfree((void *) (PAGE_MASK & (unsigned long) addr));
|
||||
vunmap((void *)(PAGE_MASK & (unsigned long)addr));
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
|
||||
|
@ -758,10 +758,10 @@ config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
|
||||
depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
|
||||
---help---
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
Say Y here to experiment with turning CPUs off and on, and to
|
||||
enable suspend on SMP systems. CPUs can be controlled through
|
||||
/sys/devices/system/cpu.
|
||||
|
||||
Say N.
|
||||
|
||||
endmenu
|
||||
|
||||
|
@ -1066,6 +1066,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = disable_acpi_pci,
|
||||
.ident = "HP xw9300",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -332,10 +332,11 @@ static int __init ppro_init(char ** cpu_type)
|
||||
{
|
||||
__u8 cpu_model = boot_cpu_data.x86_model;
|
||||
|
||||
if (cpu_model > 0xd)
|
||||
if (cpu_model == 14)
|
||||
*cpu_type = "i386/core";
|
||||
else if (cpu_model > 0xd)
|
||||
return 0;
|
||||
|
||||
if (cpu_model == 9) {
|
||||
else if (cpu_model == 9) {
|
||||
*cpu_type = "i386/p6_mobile";
|
||||
} else if (cpu_model > 5) {
|
||||
*cpu_type = "i386/piii";
|
||||
|
@ -134,7 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
|
||||
CONFIG_ARCH_SPARSEMEM_ENABLE=y
|
||||
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_NODES_SHIFT=8
|
||||
CONFIG_NODES_SHIFT=10
|
||||
CONFIG_VIRTUAL_MEM_MAP=y
|
||||
CONFIG_HOLES_IN_ZONE=y
|
||||
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
|
||||
@ -1159,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
|
||||
# CONFIG_SCHEDSTATS is not set
|
||||
# CONFIG_DEBUG_SLAB is not set
|
||||
CONFIG_DEBUG_PREEMPT=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
# CONFIG_DEBUG_MUTEXES is not set
|
||||
# CONFIG_DEBUG_SPINLOCK is not set
|
||||
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
|
||||
# CONFIG_DEBUG_KOBJECT is not set
|
||||
|
@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq)
|
||||
ia64_vector vec = irq_to_vector(irq);
|
||||
struct iosapic_rte_info *rte;
|
||||
|
||||
move_irq(irq);
|
||||
move_native_irq(irq);
|
||||
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
|
||||
iosapic_eoi(rte->addr, vec);
|
||||
}
|
||||
@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq)
|
||||
{
|
||||
irq_desc_t *idesc = irq_descp(irq);
|
||||
|
||||
move_irq(irq);
|
||||
move_native_irq(irq);
|
||||
/*
|
||||
* Once we have recorded IRQ_PENDING already, we can mask the
|
||||
* interrupt for real. This prevents IRQ storms from unhandled
|
||||
|
@ -101,7 +101,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
|
||||
|
||||
if (irq < NR_IRQS) {
|
||||
irq_affinity[irq] = mask;
|
||||
set_irq_info(irq, mask);
|
||||
irq_redir[irq] = (char) (redir & 0xff);
|
||||
}
|
||||
}
|
||||
|
@ -1636,7 +1636,7 @@ static int __init prom_find_machine_type(void)
|
||||
compat, sizeof(compat)-1);
|
||||
if (len <= 0)
|
||||
return PLATFORM_GENERIC;
|
||||
if (strncmp(compat, RELOC("chrp"), 4))
|
||||
if (strcmp(compat, RELOC("chrp")))
|
||||
return PLATFORM_GENERIC;
|
||||
|
||||
/* Default to pSeries. We need to know if we are running LPAR */
|
||||
|
@ -255,7 +255,7 @@ static int __init pSeries_init_panel(void)
|
||||
{
|
||||
/* Manually leave the kernel version on the panel. */
|
||||
ppc_md.progress("Linux ppc64\n", 0);
|
||||
ppc_md.progress(system_utsname.version, 0);
|
||||
ppc_md.progress(system_utsname.release, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1650,3 +1650,11 @@ sys_tee_wrapper:
|
||||
llgfr %r4,%r4 # size_t
|
||||
llgfr %r5,%r5 # unsigned int
|
||||
jg sys_tee
|
||||
|
||||
.globl compat_sys_vmsplice_wrapper
|
||||
compat_sys_vmsplice_wrapper:
|
||||
lgfr %r2,%r2 # int
|
||||
llgtr %r3,%r3 # compat_iovec *
|
||||
llgfr %r4,%r4 # unsigned int
|
||||
llgfr %r5,%r5 # unsigned int
|
||||
jg compat_sys_vmsplice
|
||||
|
@ -317,3 +317,4 @@ SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapp
|
||||
SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
|
||||
SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
|
||||
SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
|
||||
SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper)
|
||||
|
@ -249,18 +249,19 @@ static inline void stop_hz_timer(void)
|
||||
unsigned long flags;
|
||||
unsigned long seq, next;
|
||||
__u64 timer, todval;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (sysctl_hz_timer != 0)
|
||||
return;
|
||||
|
||||
cpu_set(smp_processor_id(), nohz_cpu_mask);
|
||||
cpu_set(cpu, nohz_cpu_mask);
|
||||
|
||||
/*
|
||||
* Leave the clock comparator set up for the next timer
|
||||
* tick if either rcu or a softirq is pending.
|
||||
*/
|
||||
if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
|
||||
cpu_clear(smp_processor_id(), nohz_cpu_mask);
|
||||
if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
|
||||
cpu_clear(cpu, nohz_cpu_mask);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -12,9 +12,10 @@ static int
|
||||
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
|
||||
{
|
||||
if (hwdev && bus + size > *hwdev->dma_mask) {
|
||||
printk(KERN_ERR
|
||||
"nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
|
||||
name, (long long)bus, size, (long long)*hwdev->dma_mask);
|
||||
if (*hwdev->dma_mask >= 0xffffffffULL)
|
||||
printk(KERN_ERR
|
||||
"nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
|
||||
name, (long long)bus, size, (long long)*hwdev->dma_mask);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
@ -102,6 +102,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->eflags & X86_EFLAGS_IF)
|
||||
local_irq_disable();
|
||||
/* Make sure to not schedule here because we could be running
|
||||
on an exception stack. */
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
@ -483,8 +485,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
conditional_sti(regs);
|
||||
|
||||
tsk->thread.error_code = error_code;
|
||||
tsk->thread.trap_no = trapnr;
|
||||
|
||||
@ -521,6 +521,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
|
||||
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
|
||||
== NOTIFY_STOP) \
|
||||
return; \
|
||||
conditional_sti(regs); \
|
||||
do_trap(trapnr, signr, str, regs, error_code, NULL); \
|
||||
}
|
||||
|
||||
@ -535,6 +536,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
|
||||
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
|
||||
== NOTIFY_STOP) \
|
||||
return; \
|
||||
conditional_sti(regs); \
|
||||
do_trap(trapnr, signr, str, regs, error_code, &info); \
|
||||
}
|
||||
|
||||
@ -548,7 +550,17 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
|
||||
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
|
||||
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
|
||||
DO_ERROR(18, SIGSEGV, "reserved", reserved)
|
||||
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
|
||||
|
||||
/* Runs on IST stack */
|
||||
asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
|
||||
12, SIGBUS) == NOTIFY_STOP)
|
||||
return;
|
||||
preempt_conditional_sti(regs);
|
||||
do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
|
||||
preempt_conditional_cli(regs);
|
||||
}
|
||||
|
||||
asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
|
||||
{
|
||||
@ -682,8 +694,9 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
|
||||
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
|
||||
return;
|
||||
}
|
||||
preempt_conditional_sti(regs);
|
||||
do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
|
||||
return;
|
||||
preempt_conditional_cli(regs);
|
||||
}
|
||||
|
||||
/* Help handler running on IST stack to switch back to user stack
|
||||
|
@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
|
||||
static struct bootnode nodes[MAX_NUMNODES] __initdata;
|
||||
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
|
||||
static int found_add_area __initdata;
|
||||
int hotadd_percent __initdata = 10;
|
||||
int hotadd_percent __initdata = 0;
|
||||
#ifndef RESERVE_HOTADD
|
||||
#define hotadd_percent 0 /* Ignore all settings */
|
||||
#endif
|
||||
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
|
||||
|
||||
/* Too small nodes confuse the VM badly. Usually they result
|
||||
@ -103,6 +106,7 @@ static __init void bad_srat(void)
|
||||
int i;
|
||||
printk(KERN_ERR "SRAT: SRAT not used.\n");
|
||||
acpi_numa = -1;
|
||||
found_add_area = 0;
|
||||
for (i = 0; i < MAX_LOCAL_APIC; i++)
|
||||
apicid_to_node[i] = NUMA_NO_NODE;
|
||||
for (i = 0; i < MAX_NUMNODES; i++)
|
||||
@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
|
||||
int pxm, node;
|
||||
if (srat_disabled())
|
||||
return;
|
||||
if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
|
||||
if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
|
||||
bad_srat();
|
||||
return;
|
||||
}
|
||||
if (pa->flags.enabled == 0)
|
||||
@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
|
||||
allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
|
||||
allowed = (allowed / 100) * hotadd_percent;
|
||||
if (allocated + mem > allowed) {
|
||||
unsigned long range;
|
||||
/* Give them at least part of their hotadd memory upto hotadd_percent
|
||||
It would be better to spread the limit out
|
||||
over multiple hotplug areas, but that is too complicated
|
||||
right now */
|
||||
if (allocated >= allowed)
|
||||
return 0;
|
||||
pages = (allowed - allocated + mem) / sizeof(struct page);
|
||||
range = allowed - allocated;
|
||||
pages = (range / PAGE_SIZE);
|
||||
mem = pages * sizeof(struct page);
|
||||
nd->end = nd->start + pages*PAGE_SIZE;
|
||||
nd->end = nd->start + range;
|
||||
}
|
||||
/* Not completely fool proof, but a good sanity check */
|
||||
addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
|
||||
|
@ -291,7 +291,7 @@ config SX
|
||||
|
||||
config RIO
|
||||
tristate "Specialix RIO system support"
|
||||
depends on SERIAL_NONSTANDARD && !64BIT
|
||||
depends on SERIAL_NONSTANDARD
|
||||
help
|
||||
This is a driver for the Specialix RIO, a smart serial card which
|
||||
drives an outboard box that can support up to 128 ports. Product
|
||||
|
@ -33,12 +33,6 @@
|
||||
#ifndef __rio_host_h__
|
||||
#define __rio_host_h__
|
||||
|
||||
#ifdef SCCS_LABELS
|
||||
#ifndef lint
|
||||
static char *_host_h_sccs_ = "@(#)host.h 1.2";
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
** the host structure - one per host card in the system.
|
||||
*/
|
||||
@ -77,9 +71,6 @@ struct Host {
|
||||
#define RC_STARTUP 1
|
||||
#define RC_RUNNING 2
|
||||
#define RC_STUFFED 3
|
||||
#define RC_SOMETHING 4
|
||||
#define RC_SOMETHING_NEW 5
|
||||
#define RC_SOMETHING_ELSE 6
|
||||
#define RC_READY 7
|
||||
#define RUN_STATE 7
|
||||
/*
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/termios.h>
|
||||
#include <linux/serial.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <linux/generic_serial.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -1394,14 +1394,17 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
|
||||
return RIO_FAIL;
|
||||
}
|
||||
|
||||
if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum);
|
||||
if ((PortP->InUse == (typeof(PortP->InUse))-1) ||
|
||||
!(CmdBlkP = RIOGetCmdBlk())) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block "
|
||||
"for command %d on port %d\n", Cmd, PortP->PortNum);
|
||||
return RIO_FAIL;
|
||||
}
|
||||
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n",
|
||||
CmdBlkP, PortP->InUse);
|
||||
|
||||
PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0];
|
||||
PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0];
|
||||
|
||||
CmdBlkP->Packet.src_unit = 0;
|
||||
if (PortP->SecondBlock)
|
||||
@ -1425,38 +1428,46 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
|
||||
|
||||
switch (Cmd) {
|
||||
case MEMDUMP:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
|
||||
"(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
|
||||
PktCmdP->SubCommand = MEMDUMP;
|
||||
PktCmdP->SubAddr = SubCmd.Addr;
|
||||
break;
|
||||
case FCLOSE:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
case READ_REGISTER:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
|
||||
"command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
|
||||
PktCmdP->SubCommand = READ_REGISTER;
|
||||
PktCmdP->SubAddr = SubCmd.Addr;
|
||||
break;
|
||||
case RESUME:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
case RFLUSH:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
|
||||
CmdBlkP);
|
||||
CmdBlkP->PostFuncP = RIORFlushEnable;
|
||||
break;
|
||||
case SUSPEND:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
|
||||
case MGET:
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
|
||||
CmdBlkP);
|
||||
break;
|
||||
|
||||
case MSET:
|
||||
case MBIC:
|
||||
case MBIS:
|
||||
CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
|
||||
"blk %p\n", CmdBlkP);
|
||||
break;
|
||||
|
||||
case WFLUSH:
|
||||
@ -1465,12 +1476,14 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
|
||||
** allowed then we should not bother sending any more to the
|
||||
** RTA.
|
||||
*/
|
||||
if ((int) ((char) PortP->WflushFlag) == (int) -1) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!");
|
||||
if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, "
|
||||
"WflushFlag about to wrap!");
|
||||
RIOFreeCmdBlk(CmdBlkP);
|
||||
return (RIO_FAIL);
|
||||
} else {
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP);
|
||||
rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command "
|
||||
"blk %p\n", CmdBlkP);
|
||||
CmdBlkP->PostFuncP = RIOWFlushMark;
|
||||
}
|
||||
break;
|
||||
|
@ -33,10 +33,6 @@
|
||||
#ifndef __rioioctl_h__
|
||||
#define __rioioctl_h__
|
||||
|
||||
#ifdef SCCS_LABELS
|
||||
static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
|
||||
#endif
|
||||
|
||||
/*
|
||||
** RIO device driver - user ioctls and associated structures.
|
||||
*/
|
||||
@ -44,55 +40,13 @@ static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
|
||||
struct portStats {
|
||||
int port;
|
||||
int gather;
|
||||
ulong txchars;
|
||||
ulong rxchars;
|
||||
ulong opens;
|
||||
ulong closes;
|
||||
ulong ioctls;
|
||||
unsigned long txchars;
|
||||
unsigned long rxchars;
|
||||
unsigned long opens;
|
||||
unsigned long closes;
|
||||
unsigned long ioctls;
|
||||
};
|
||||
|
||||
|
||||
#define rIOC ('r'<<8)
|
||||
#define TCRIOSTATE (rIOC | 1)
|
||||
#define TCRIOXPON (rIOC | 2)
|
||||
#define TCRIOXPOFF (rIOC | 3)
|
||||
#define TCRIOXPCPS (rIOC | 4)
|
||||
#define TCRIOXPRINT (rIOC | 5)
|
||||
#define TCRIOIXANYON (rIOC | 6)
|
||||
#define TCRIOIXANYOFF (rIOC | 7)
|
||||
#define TCRIOIXONON (rIOC | 8)
|
||||
#define TCRIOIXONOFF (rIOC | 9)
|
||||
#define TCRIOMBIS (rIOC | 10)
|
||||
#define TCRIOMBIC (rIOC | 11)
|
||||
#define TCRIOTRIAD (rIOC | 12)
|
||||
#define TCRIOTSTATE (rIOC | 13)
|
||||
|
||||
/*
|
||||
** 15.10.1998 ARG - ESIL 0761 part fix
|
||||
** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS
|
||||
** appears to not support hardware flow control).
|
||||
*/
|
||||
#define TCRIOCTSFLOWEN (rIOC | 14) /* enable CTS flow control */
|
||||
#define TCRIOCTSFLOWDIS (rIOC | 15) /* disable CTS flow control */
|
||||
#define TCRIORTSFLOWEN (rIOC | 16) /* enable RTS flow control */
|
||||
#define TCRIORTSFLOWDIS (rIOC | 17) /* disable RTS flow control */
|
||||
|
||||
/*
|
||||
** 09.12.1998 ARG - ESIL 0776 part fix
|
||||
** Definition for 'RIOC' also appears in daemon.h, so we'd better do a
|
||||
** #ifndef here first.
|
||||
** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now
|
||||
** allowed to be used by customers.
|
||||
**
|
||||
** 05.02.1999 ARG -
|
||||
** This is what I've decied to do with ioctls etc., which are intended to be
|
||||
** invoked from users applications :
|
||||
** Anything that needs to be defined here will be removed from daemon.h, that
|
||||
** way it won't end up having to be defined/maintained in two places. The only
|
||||
** consequence of this is that this file should now be #include'd by daemon.h
|
||||
**
|
||||
** 'stats' ioctls now #define'd here as they are to be used by customers.
|
||||
*/
|
||||
#define RIOC ('R'<<8)|('i'<<16)|('o'<<24)
|
||||
|
||||
#define RIO_QUICK_CHECK (RIOC | 105)
|
||||
|
@ -22,7 +22,7 @@ config TCG_TPM
|
||||
|
||||
config TCG_TIS
|
||||
tristate "TPM Interface Specification 1.2 Interface"
|
||||
depends on TCG_TPM
|
||||
depends on TCG_TPM && PNPACPI
|
||||
---help---
|
||||
If you have a TPM security chip that is compliant with the
|
||||
TCG TIS 1.2 TPM specification say Yes and it will be accessible
|
||||
|
@ -140,7 +140,7 @@ extern int tpm_pm_resume(struct device *);
|
||||
extern struct dentry ** tpm_bios_log_setup(char *);
|
||||
extern void tpm_bios_log_teardown(struct dentry **);
|
||||
#else
|
||||
static inline struct dentry* tpm_bios_log_setup(char *name)
|
||||
static inline struct dentry ** tpm_bios_log_setup(char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ enum tis_int_flags {
|
||||
};
|
||||
|
||||
enum tis_defaults {
|
||||
TIS_MEM_BASE = 0xFED4000,
|
||||
TIS_MEM_BASE = 0xFED40000,
|
||||
TIS_MEM_LEN = 0x5000,
|
||||
TIS_SHORT_TIMEOUT = 750, /* ms */
|
||||
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
|
||||
|
@ -33,11 +33,6 @@
|
||||
* 82801E (C-ICH) : document number 273599-001, 273645-002,
|
||||
* 82801EB (ICH5) : document number 252516-001, 252517-003,
|
||||
* 82801ER (ICH5R) : document number 252516-001, 252517-003,
|
||||
* 82801FB (ICH6) : document number 301473-002, 301474-007,
|
||||
* 82801FR (ICH6R) : document number 301473-002, 301474-007,
|
||||
* 82801FBM (ICH6-M) : document number 301473-002, 301474-007,
|
||||
* 82801FW (ICH6W) : document number 301473-001, 301474-007,
|
||||
* 82801FRW (ICH6RW) : document number 301473-001, 301474-007
|
||||
*
|
||||
* 20000710 Nils Faerber
|
||||
* Initial Version 0.01
|
||||
@ -66,6 +61,10 @@
|
||||
* 20050807 Wim Van Sebroeck <wim@iguana.be>
|
||||
* 0.08 Make sure that the watchdog is only "armed" when started.
|
||||
* (Kernel Bug 4251)
|
||||
* 20060416 Wim Van Sebroeck <wim@iguana.be>
|
||||
* 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and
|
||||
* ICH7 chipsets. (See Kernel Bug 6031 - other code will support these
|
||||
* chipsets)
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -90,7 +89,7 @@
|
||||
#include "i8xx_tco.h"
|
||||
|
||||
/* Module and version information */
|
||||
#define TCO_VERSION "0.08"
|
||||
#define TCO_VERSION "0.09"
|
||||
#define TCO_MODULE_NAME "i8xx TCO timer"
|
||||
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
|
||||
#define PFX TCO_MODULE_NAME ": "
|
||||
@ -391,11 +390,6 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = {
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ 0, }, /* End of list */
|
||||
};
|
||||
|
@ -423,6 +423,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
|
||||
if (tmr_atboot && started == 0) {
|
||||
printk(KERN_INFO PFX "Starting Watchdog Timer\n");
|
||||
s3c2410wdt_start();
|
||||
} else if (!tmr_atboot) {
|
||||
/* if we're not enabling the watchdog, then ensure it is
|
||||
* disabled if it has been left running from the bootloader
|
||||
* or other source */
|
||||
|
||||
s3c2410wdt_stop();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
printk(banner);
|
||||
printk("%s\n", banner);
|
||||
|
||||
spin_lock_init(&sc1200wdt_lock);
|
||||
sema_init(&open_sem, 1);
|
||||
|
@ -392,6 +392,7 @@ static struct pcmcia_device_id ide_ids[] = {
|
||||
PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
|
||||
PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
|
||||
PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
|
||||
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
|
||||
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
|
||||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
|
||||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
|
||||
|
@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
|
||||
* register content.
|
||||
* To actually enable physical responses is the job of our interrupt
|
||||
* handler which programs the physical request filter. */
|
||||
reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
|
||||
reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
|
||||
|
||||
DBGMSG("physUpperBoundOffset=%08x",
|
||||
reg_read(ohci, OHCI1394_PhyUpperBound));
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/fs.h>
|
||||
@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
|
||||
*/
|
||||
static int max_sectors = SBP2_MAX_SECTORS;
|
||||
module_param(max_sectors, int, 0444);
|
||||
MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
|
||||
MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
|
||||
__stringify(SBP2_MAX_SECTORS) ")");
|
||||
|
||||
/*
|
||||
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should
|
||||
@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644);
|
||||
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
|
||||
|
||||
/*
|
||||
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
|
||||
* if your sbp2 device is not properly handling the SCSI inquiry command.
|
||||
* This hack makes the inquiry look more like a typical MS Windows inquiry
|
||||
* by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
|
||||
* If any of the following workarounds is required for your device to work,
|
||||
* please submit the kernel messages logged by sbp2 to the linux1394-devel
|
||||
* mailing list.
|
||||
*
|
||||
* If force_inquiry_hack=1 is required for your device to work,
|
||||
* please submit the logged sbp2_firmware_revision value of this device to
|
||||
* the linux1394-devel mailing list.
|
||||
* - 128kB max transfer
|
||||
* Limit transfer size. Necessary for some old bridges.
|
||||
*
|
||||
* - 36 byte inquiry
|
||||
* When scsi_mod probes the device, let the inquiry command look like that
|
||||
* from MS Windows.
|
||||
*
|
||||
* - skip mode page 8
|
||||
* Suppress sending of mode_sense for mode page 8 if the device pretends to
|
||||
* support the SCSI Primary Block commands instead of Reduced Block Commands.
|
||||
*
|
||||
* - fix capacity
|
||||
* Tell sd_mod to correct the last sector number reported by read_capacity.
|
||||
* Avoids access beyond actual disk limits on devices with an off-by-one bug.
|
||||
* Don't use this with devices which don't have this bug.
|
||||
*
|
||||
* - override internal blacklist
|
||||
* Instead of adding to the built-in blacklist, use only the workarounds
|
||||
* specified in the module load parameter.
|
||||
* Useful if a blacklist entry interfered with a non-broken device.
|
||||
*/
|
||||
static int sbp2_default_workarounds;
|
||||
module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
|
||||
MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
|
||||
", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
|
||||
", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
|
||||
", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
|
||||
", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
|
||||
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
|
||||
", or a combination)");
|
||||
|
||||
/* legacy parameter */
|
||||
static int force_inquiry_hack;
|
||||
module_param(force_inquiry_hack, int, 0644);
|
||||
MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
|
||||
MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
|
||||
|
||||
/*
|
||||
* Export information about protocols/devices supported by this driver.
|
||||
@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = {
|
||||
};
|
||||
|
||||
/*
|
||||
* List of device firmwares that require the inquiry hack.
|
||||
* Yields a few false positives but did not break other devices so far.
|
||||
* List of devices with known bugs.
|
||||
*
|
||||
* The firmware_revision field, masked with 0xffff00, is the best indicator
|
||||
* for the type of bridge chip of a device. It yields a few false positives
|
||||
* but this did not break correctly behaving devices so far.
|
||||
*/
|
||||
static u32 sbp2_broken_inquiry_list[] = {
|
||||
0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
|
||||
/* DViCO Momobay CX-1 */
|
||||
0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
|
||||
/* QPS Fire DVDBurner */
|
||||
static const struct {
|
||||
u32 firmware_revision;
|
||||
u32 model_id;
|
||||
unsigned workarounds;
|
||||
} sbp2_workarounds_table[] = {
|
||||
/* TSB42AA9 */ {
|
||||
.firmware_revision = 0x002800,
|
||||
.workarounds = SBP2_WORKAROUND_INQUIRY_36 |
|
||||
SBP2_WORKAROUND_MODE_SENSE_8,
|
||||
},
|
||||
/* Initio bridges, actually only needed for some older ones */ {
|
||||
.firmware_revision = 0x000200,
|
||||
.workarounds = SBP2_WORKAROUND_INQUIRY_36,
|
||||
},
|
||||
/* Symbios bridge */ {
|
||||
.firmware_revision = 0xa0b800,
|
||||
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
|
||||
},
|
||||
/*
|
||||
* Note about the following Apple iPod blacklist entries:
|
||||
*
|
||||
* There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
|
||||
* matching logic treats 0 as a wildcard, we cannot match this ID
|
||||
* without rewriting the matching routine. Fortunately these iPods
|
||||
* do not feature the read_capacity bug according to one report.
|
||||
* Read_capacity behaviour as well as model_id could change due to
|
||||
* Apple-supplied firmware updates though.
|
||||
*/
|
||||
/* iPod 4th generation */ {
|
||||
.firmware_revision = 0x0a2700,
|
||||
.model_id = 0x000021,
|
||||
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
|
||||
},
|
||||
/* iPod mini */ {
|
||||
.firmware_revision = 0x0a2700,
|
||||
.model_id = 0x000023,
|
||||
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
|
||||
},
|
||||
/* iPod Photo */ {
|
||||
.firmware_revision = 0x0a2700,
|
||||
.model_id = 0x00007e,
|
||||
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
|
||||
}
|
||||
};
|
||||
|
||||
/**************************************
|
||||
@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
|
||||
|
||||
/* Register the status FIFO address range. We could use the same FIFO
|
||||
* for targets at different nodes. However we need different FIFOs per
|
||||
* target in order to support multi-unit devices. */
|
||||
* target in order to support multi-unit devices.
|
||||
* The FIFO is located out of the local host controller's physical range
|
||||
* but, if possible, within the posted write area. Status writes will
|
||||
* then be performed as unified transactions. This slightly reduces
|
||||
* bandwidth usage, and some Prolific based devices seem to require it.
|
||||
*/
|
||||
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
|
||||
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
|
||||
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
|
||||
~0ULL, ~0ULL);
|
||||
0x010000000000ULL, CSR1212_ALL_SPACE_END);
|
||||
if (!scsi_id->status_fifo_addr) {
|
||||
SBP2_ERR("failed to allocate status FIFO address range");
|
||||
goto failed_alloc;
|
||||
@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
|
||||
struct csr1212_dentry *dentry;
|
||||
u64 management_agent_addr;
|
||||
u32 command_set_spec_id, command_set, unit_characteristics,
|
||||
firmware_revision, workarounds;
|
||||
firmware_revision;
|
||||
unsigned workarounds;
|
||||
int i;
|
||||
|
||||
SBP2_DEBUG_ENTER();
|
||||
@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
|
||||
case SBP2_FIRMWARE_REVISION_KEY:
|
||||
/* Firmware revision */
|
||||
firmware_revision = kv->value.immediate;
|
||||
if (force_inquiry_hack)
|
||||
SBP2_INFO("sbp2_firmware_revision = %x",
|
||||
(unsigned int)firmware_revision);
|
||||
else
|
||||
SBP2_DEBUG("sbp2_firmware_revision = %x",
|
||||
(unsigned int)firmware_revision);
|
||||
SBP2_DEBUG("sbp2_firmware_revision = %x",
|
||||
(unsigned int)firmware_revision);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
|
||||
}
|
||||
}
|
||||
|
||||
/* This is the start of our broken device checking. We try to hack
|
||||
* around oddities and known defects. */
|
||||
workarounds = 0x0;
|
||||
|
||||
/* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
|
||||
* bridge with 128KB max transfer size limitation. For sanity, we
|
||||
* only voice this when the current max_sectors setting
|
||||
* exceeds the 128k limit. By default, that is not the case.
|
||||
*
|
||||
* It would be really nice if we could detect this before the scsi
|
||||
* host gets initialized. That way we can down-force the
|
||||
* max_sectors to account for it. That is not currently
|
||||
* possible. */
|
||||
if ((firmware_revision & 0xffff00) ==
|
||||
SBP2_128KB_BROKEN_FIRMWARE &&
|
||||
(max_sectors * 512) > (128*1024)) {
|
||||
SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
|
||||
SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
|
||||
max_sectors);
|
||||
workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
|
||||
workarounds = sbp2_default_workarounds;
|
||||
if (force_inquiry_hack) {
|
||||
SBP2_WARN("force_inquiry_hack is deprecated. "
|
||||
"Use parameter 'workarounds' instead.");
|
||||
workarounds |= SBP2_WORKAROUND_INQUIRY_36;
|
||||
}
|
||||
|
||||
/* Check for a blacklisted set of devices that require us to force
|
||||
* a 36 byte host inquiry. This can be overriden as a module param
|
||||
* (to force all hosts). */
|
||||
for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
|
||||
if ((firmware_revision & 0xffff00) ==
|
||||
sbp2_broken_inquiry_list[i]) {
|
||||
SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
|
||||
workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
|
||||
break; /* No need to continue. */
|
||||
if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
|
||||
for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
|
||||
if (sbp2_workarounds_table[i].firmware_revision &&
|
||||
sbp2_workarounds_table[i].firmware_revision !=
|
||||
(firmware_revision & 0xffff00))
|
||||
continue;
|
||||
if (sbp2_workarounds_table[i].model_id &&
|
||||
sbp2_workarounds_table[i].model_id != ud->model_id)
|
||||
continue;
|
||||
workarounds |= sbp2_workarounds_table[i].workarounds;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (workarounds)
|
||||
SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
|
||||
"(firmware_revision 0x%06x, vendor_id 0x%06x,"
|
||||
" model_id 0x%06x)",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
|
||||
workarounds, firmware_revision,
|
||||
ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
|
||||
ud->model_id);
|
||||
|
||||
/* We would need one SCSI host template for each target to adjust
|
||||
* max_sectors on the fly, therefore warn only. */
|
||||
if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
|
||||
(max_sectors * 512) > (128 * 1024))
|
||||
SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
|
||||
"max transfer size. WARNING: Current max_sectors "
|
||||
"setting is larger than 128KB (%d sectors)",
|
||||
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
|
||||
max_sectors);
|
||||
|
||||
/* If this is a logical unit directory entry, process the parent
|
||||
* to get the values. */
|
||||
@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
scsi_id->sdev = sdev;
|
||||
|
||||
if (force_inquiry_hack ||
|
||||
scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
|
||||
if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
|
||||
sdev->inquiry_len = 36;
|
||||
sdev->skip_ms_page_8 = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbp2scsi_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_id_instance_data *scsi_id =
|
||||
(struct scsi_id_instance_data *)sdev->host->hostdata[0];
|
||||
|
||||
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
sdev->use_10_for_rw = 1;
|
||||
sdev->use_10_for_ms = 1;
|
||||
|
||||
if (sdev->type == TYPE_DISK &&
|
||||
scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
|
||||
sdev->skip_ms_page_8 = 1;
|
||||
if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
|
||||
sdev->fix_capacity = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2603,7 +2684,9 @@ static int sbp2_module_init(void)
|
||||
scsi_driver_template.cmd_per_lun = 1;
|
||||
}
|
||||
|
||||
/* Set max sectors (module load option). Default is 255 sectors. */
|
||||
if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
|
||||
(max_sectors * 512) > (128 * 1024))
|
||||
max_sectors = 128 * 1024 / 512;
|
||||
scsi_driver_template.max_sectors = max_sectors;
|
||||
|
||||
/* Register our high level driver with 1394 stack */
|
||||
|
@ -226,11 +226,6 @@ struct sbp2_status_block {
|
||||
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
|
||||
#define SBP2_SW_VERSION_ENTRY 0x00010483
|
||||
|
||||
/*
|
||||
* Other misc defines
|
||||
*/
|
||||
#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
|
||||
|
||||
/*
|
||||
* SCSI specific stuff
|
||||
*/
|
||||
@ -239,6 +234,13 @@ struct sbp2_status_block {
|
||||
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
|
||||
#define SBP2_MAX_CMDS 8 /* This should be safe */
|
||||
|
||||
/* Flags for detected oddities and brokeness */
|
||||
#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
|
||||
#define SBP2_WORKAROUND_INQUIRY_36 0x2
|
||||
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
|
||||
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
|
||||
#define SBP2_WORKAROUND_OVERRIDE 0x100
|
||||
|
||||
/* This is the two dma types we use for cmd_dma below */
|
||||
enum cmd_dma_types {
|
||||
CMD_DMA_NONE,
|
||||
@ -268,10 +270,6 @@ struct sbp2_command_info {
|
||||
|
||||
};
|
||||
|
||||
/* A list of flags for detected oddities and brokeness. */
|
||||
#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
|
||||
#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
|
||||
|
||||
struct sbp2scsi_host_info;
|
||||
|
||||
/*
|
||||
@ -345,7 +343,7 @@ struct scsi_id_instance_data {
|
||||
struct Scsi_Host *scsi_host;
|
||||
|
||||
/* Device specific workarounds/brokeness */
|
||||
u32 workarounds;
|
||||
unsigned workarounds;
|
||||
};
|
||||
|
||||
/* Sbp2 host data structure (one per IEEE1394 host) */
|
||||
|
@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
|
||||
*/
|
||||
|
||||
work = kmalloc(sizeof *work, GFP_KERNEL);
|
||||
if (!work)
|
||||
if (!work) {
|
||||
mmput(mm);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->work, ib_umem_account, work);
|
||||
work->mm = mm;
|
||||
|
@ -182,7 +182,7 @@ struct mthca_cmd_context {
|
||||
u8 status;
|
||||
};
|
||||
|
||||
static int fw_cmd_doorbell = 1;
|
||||
static int fw_cmd_doorbell = 0;
|
||||
module_param(fw_cmd_doorbell, int, 0644);
|
||||
MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
|
||||
"(and supported by FW)");
|
||||
|
@ -1727,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
|
||||
ind = qp->rq.next_ind;
|
||||
|
||||
for (nreq = 0; wr; ++nreq, wr = wr->next) {
|
||||
if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
|
||||
nreq = 0;
|
||||
|
||||
doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
|
||||
doorbell[1] = cpu_to_be32(qp->qpn << 8);
|
||||
|
||||
wmb();
|
||||
|
||||
mthca_write64(doorbell,
|
||||
dev->kar + MTHCA_RECEIVE_DOORBELL,
|
||||
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
|
||||
|
||||
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
|
||||
size0 = 0;
|
||||
}
|
||||
|
||||
for (nreq = 0; wr; wr = wr->next) {
|
||||
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
|
||||
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
|
||||
" %d max, %d nreq)\n", qp->qpn,
|
||||
@ -1797,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
++ind;
|
||||
if (unlikely(ind >= qp->rq.max))
|
||||
ind -= qp->rq.max;
|
||||
|
||||
++nreq;
|
||||
if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
|
||||
nreq = 0;
|
||||
|
||||
doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
|
||||
doorbell[1] = cpu_to_be32(qp->qpn << 8);
|
||||
|
||||
wmb();
|
||||
|
||||
mthca_write64(doorbell,
|
||||
dev->kar + MTHCA_RECEIVE_DOORBELL,
|
||||
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
|
||||
|
||||
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
|
||||
size0 = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
|
||||
/* XXX should send SRP_I_LOGOUT request */
|
||||
|
||||
init_completion(&target->done);
|
||||
ib_send_cm_dreq(target->cm_id, NULL, 0);
|
||||
if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
|
||||
printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
|
||||
return;
|
||||
}
|
||||
wait_for_completion(&target->done);
|
||||
}
|
||||
|
||||
@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr)
|
||||
spin_lock_irq(target->scsi_host->host_lock);
|
||||
if (target->state != SRP_TARGET_DEAD) {
|
||||
spin_unlock_irq(target->scsi_host->host_lock);
|
||||
scsi_host_put(target->scsi_host);
|
||||
return;
|
||||
}
|
||||
target->state = SRP_TARGET_REMOVED;
|
||||
@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr)
|
||||
ib_destroy_cm_id(target->cm_id);
|
||||
srp_free_target_ib(target);
|
||||
scsi_host_put(target->scsi_host);
|
||||
/* And another put to really free the target port... */
|
||||
scsi_host_put(target->scsi_host);
|
||||
}
|
||||
|
||||
static int srp_connect_target(struct srp_target_port *target)
|
||||
@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
||||
list_for_each_entry_safe(req, tmp, &target->req_queue, list)
|
||||
if (req->scmnd->device == scmnd->device) {
|
||||
req->scmnd->result = DID_RESET << 16;
|
||||
scmnd->scsi_done(scmnd);
|
||||
req->scmnd->scsi_done(req->scmnd);
|
||||
srp_remove_req(target, req);
|
||||
}
|
||||
|
||||
|
@ -1499,7 +1499,6 @@ static int __init capi_init(void)
|
||||
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
|
||||
return major_ret;
|
||||
}
|
||||
capi_major = major_ret;
|
||||
capi_class = class_create(THIS_MODULE, "capi");
|
||||
if (IS_ERR(capi_class)) {
|
||||
unregister_chrdev(capi_major, "capi20");
|
||||
|
@ -710,8 +710,8 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||
retval = -ENODEV; //FIXME
|
||||
|
||||
/* See if the device offered us matches what we can accept */
|
||||
if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) ||
|
||||
(le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID)))
|
||||
if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
|
||||
(le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID))
|
||||
return -ENODEV;
|
||||
|
||||
/* this starts to become ascii art... */
|
||||
|
@ -4,8 +4,11 @@ menu "LED devices"
|
||||
config NEW_LEDS
|
||||
bool "LED Support"
|
||||
help
|
||||
Say Y to enable Linux LED support. This is not related to standard
|
||||
keyboard LEDs which are controlled via the input system.
|
||||
Say Y to enable Linux LED support. This allows control of supported
|
||||
LEDs from both userspace and optionally, by kernel events (triggers).
|
||||
|
||||
This is not related to standard keyboard LEDs which are controlled
|
||||
via the input system.
|
||||
|
||||
config LEDS_CLASS
|
||||
tristate "LED Class Support"
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/leds.h>
|
||||
#include "leds.h"
|
||||
|
||||
@ -43,9 +44,13 @@ static ssize_t led_brightness_store(struct class_device *dev,
|
||||
ssize_t ret = -EINVAL;
|
||||
char *after;
|
||||
unsigned long state = simple_strtoul(buf, &after, 10);
|
||||
size_t count = after - buf;
|
||||
|
||||
if (after - buf > 0) {
|
||||
ret = after - buf;
|
||||
if (*after && isspace(*after))
|
||||
count++;
|
||||
|
||||
if (count == size) {
|
||||
ret = count;
|
||||
led_set_brightness(led_cdev, state);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/leds.h>
|
||||
#include "leds.h"
|
||||
|
||||
@ -69,11 +70,15 @@ static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
|
||||
int ret = -EINVAL;
|
||||
char *after;
|
||||
unsigned long state = simple_strtoul(buf, &after, 10);
|
||||
size_t count = after - buf;
|
||||
|
||||
if (after - buf > 0) {
|
||||
if (*after && isspace(*after))
|
||||
count++;
|
||||
|
||||
if (count == size) {
|
||||
timer_data->delay_on = state;
|
||||
mod_timer(&timer_data->timer, jiffies + 1);
|
||||
ret = after - buf;
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -97,11 +102,15 @@ static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
|
||||
int ret = -EINVAL;
|
||||
char *after;
|
||||
unsigned long state = simple_strtoul(buf, &after, 10);
|
||||
size_t count = after - buf;
|
||||
|
||||
if (after - buf > 0) {
|
||||
if (*after && isspace(*after))
|
||||
count++;
|
||||
|
||||
if (count == size) {
|
||||
timer_data->delay_off = state;
|
||||
mod_timer(&timer_data->timer, jiffies + 1);
|
||||
ret = after - buf;
|
||||
ret = count;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -310,7 +310,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
|
||||
}
|
||||
else
|
||||
data->bytes_xfered =
|
||||
(data->blocks * (1 << data->blksz_bits)) -
|
||||
(data->blocks * data->blksz) -
|
||||
host->pio.len;
|
||||
}
|
||||
|
||||
@ -575,7 +575,7 @@ static int
|
||||
au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
|
||||
{
|
||||
|
||||
int datalen = data->blocks * (1 << data->blksz_bits);
|
||||
int datalen = data->blocks * data->blksz;
|
||||
|
||||
if (dma != 0)
|
||||
host->flags |= HOST_F_DMA;
|
||||
@ -596,7 +596,7 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
|
||||
if (host->dma.len == 0)
|
||||
return MMC_ERR_TIMEOUT;
|
||||
|
||||
au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host));
|
||||
au_writel(data->blksz - 1, HOST_BLKSIZE(host));
|
||||
|
||||
if (host->flags & HOST_F_DMA) {
|
||||
int i;
|
||||
|
@ -218,8 +218,10 @@ static int imxmci_busy_wait_for_status(struct imxmci_host *host,
|
||||
if(!loops)
|
||||
return 0;
|
||||
|
||||
dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
|
||||
loops, where, *pstat, stat_mask);
|
||||
/* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
|
||||
if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
|
||||
dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
|
||||
loops, where, *pstat, stat_mask);
|
||||
return loops;
|
||||
}
|
||||
|
||||
@ -333,6 +335,9 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd,
|
||||
WARN_ON(host->cmd != NULL);
|
||||
host->cmd = cmd;
|
||||
|
||||
/* Ensure, that clock are stopped else command programming and start fails */
|
||||
imxmci_stop_clock(host);
|
||||
|
||||
if (cmd->flags & MMC_RSP_BUSY)
|
||||
cmdat |= CMD_DAT_CONT_BUSY;
|
||||
|
||||
@ -553,7 +558,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
|
||||
int trans_done = 0;
|
||||
unsigned int stat = *pstat;
|
||||
|
||||
if(host->actual_bus_width == MMC_BUS_WIDTH_4)
|
||||
if(host->actual_bus_width != MMC_BUS_WIDTH_4)
|
||||
burst_len = 16;
|
||||
else
|
||||
burst_len = 64;
|
||||
@ -591,8 +596,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
|
||||
stat = MMC_STATUS;
|
||||
|
||||
/* Flush extra bytes from FIFO */
|
||||
while(flush_len >= 2){
|
||||
flush_len -= 2;
|
||||
while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
|
||||
i = MMC_BUFFER_ACCESS;
|
||||
stat = MMC_STATUS;
|
||||
stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
|
||||
@ -746,10 +750,6 @@ static void imxmci_tasklet_fnc(unsigned long data)
|
||||
data_dir_mask = STATUS_DATA_TRANS_DONE;
|
||||
}
|
||||
|
||||
imxmci_busy_wait_for_status(host, &stat,
|
||||
data_dir_mask,
|
||||
50, "imxmci_tasklet_fnc data");
|
||||
|
||||
if(stat & data_dir_mask) {
|
||||
clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
|
||||
imxmci_data_done(host, stat);
|
||||
@ -865,7 +865,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
|
||||
imxmci_stop_clock(host);
|
||||
MMC_CLK_RATE = (prescaler<<3) | clk;
|
||||
imxmci_start_clock(host);
|
||||
/*
|
||||
* Under my understanding, clock should not be started there, because it would
|
||||
* initiate SDHC sequencer and send last or random command into card
|
||||
*/
|
||||
/*imxmci_start_clock(host);*/
|
||||
|
||||
dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
|
||||
} else {
|
||||
|
@ -951,6 +951,7 @@ static void mmc_read_scrs(struct mmc_host *host)
|
||||
data.timeout_ns = card->csd.tacc_ns * 10;
|
||||
data.timeout_clks = card->csd.tacc_clks * 10;
|
||||
data.blksz_bits = 3;
|
||||
data.blksz = 1 << 3;
|
||||
data.blocks = 1;
|
||||
data.flags = MMC_DATA_READ;
|
||||
data.sg = &sg;
|
||||
|
@ -175,6 +175,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||
brq.data.timeout_ns = card->csd.tacc_ns * 10;
|
||||
brq.data.timeout_clks = card->csd.tacc_clks * 10;
|
||||
brq.data.blksz_bits = md->block_bits;
|
||||
brq.data.blksz = 1 << md->block_bits;
|
||||
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
|
||||
brq.stop.opcode = MMC_STOP_TRANSMISSION;
|
||||
brq.stop.arg = 0;
|
||||
|
@ -119,7 +119,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
|
||||
nob = 0xffff;
|
||||
|
||||
writel(nob, host->base + MMC_NOB);
|
||||
writel(1 << data->blksz_bits, host->base + MMC_BLKLEN);
|
||||
writel(data->blksz, host->base + MMC_BLKLEN);
|
||||
|
||||
clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
|
||||
do_div(clks, 1000000000UL);
|
||||
@ -283,7 +283,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
|
||||
* data blocks as being in error.
|
||||
*/
|
||||
if (data->error == MMC_ERR_NONE)
|
||||
data->bytes_xfered = data->blocks << data->blksz_bits;
|
||||
data->bytes_xfered = data->blocks * data->blksz;
|
||||
else
|
||||
data->bytes_xfered = 0;
|
||||
|
||||
|
@ -662,14 +662,14 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
|
||||
unsigned long dmaflags;
|
||||
|
||||
DBGF("blksz %04x blks %04x flags %08x\n",
|
||||
1 << data->blksz_bits, data->blocks, data->flags);
|
||||
data->blksz, data->blocks, data->flags);
|
||||
DBGF("tsac %d ms nsac %d clk\n",
|
||||
data->timeout_ns / 1000000, data->timeout_clks);
|
||||
|
||||
/*
|
||||
* Calculate size.
|
||||
*/
|
||||
host->size = data->blocks << data->blksz_bits;
|
||||
host->size = data->blocks * data->blksz;
|
||||
|
||||
/*
|
||||
* Check timeout values for overflow.
|
||||
@ -696,12 +696,12 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
|
||||
* Two bytes are needed for each data line.
|
||||
*/
|
||||
if (host->bus_width == MMC_BUS_WIDTH_1) {
|
||||
blksize = (1 << data->blksz_bits) + 2;
|
||||
blksize = data->blksz + 2;
|
||||
|
||||
wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
|
||||
wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
|
||||
} else if (host->bus_width == MMC_BUS_WIDTH_4) {
|
||||
blksize = (1 << data->blksz_bits) + 2 * 4;
|
||||
blksize = data->blksz + 2 * 4;
|
||||
|
||||
wbsd_write_index(host, WBSD_IDX_PBSMSB,
|
||||
((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
|
||||
|
@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
|
||||
|
||||
/* Hardware bug work-around, the chip is unable to do PCI DMA
|
||||
to/from anything above 1GB :-( */
|
||||
if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
|
||||
if (dma_mapping_error(mapping) ||
|
||||
mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
|
||||
/* Sigh... */
|
||||
pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
|
||||
if (!dma_mapping_error(mapping))
|
||||
pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
|
||||
if (skb == NULL)
|
||||
@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
|
||||
mapping = pci_map_single(bp->pdev, skb->data,
|
||||
RX_PKT_BUF_SZ,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
|
||||
pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
|
||||
if (dma_mapping_error(mapping) ||
|
||||
mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
|
||||
if (!dma_mapping_error(mapping))
|
||||
pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
if (mapping + len > B44_DMA_MASK) {
|
||||
if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
|
||||
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
|
||||
pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
|
||||
if (!dma_mapping_error(mapping))
|
||||
pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
|
||||
|
||||
bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
|
||||
GFP_ATOMIC|GFP_DMA);
|
||||
@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
mapping = pci_map_single(bp->pdev, bounce_skb->data,
|
||||
len, PCI_DMA_TODEVICE);
|
||||
if (mapping + len > B44_DMA_MASK) {
|
||||
pci_unmap_single(bp->pdev, mapping,
|
||||
if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
|
||||
if (!dma_mapping_error(mapping))
|
||||
pci_unmap_single(bp->pdev, mapping,
|
||||
len, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb_any(bounce_skb);
|
||||
goto err_out;
|
||||
@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp)
|
||||
DMA_TABLE_BYTES,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
if (rx_ring_dma + size > B44_DMA_MASK) {
|
||||
if (dma_mapping_error(rx_ring_dma) ||
|
||||
rx_ring_dma + size > B44_DMA_MASK) {
|
||||
kfree(rx_ring);
|
||||
goto out_err;
|
||||
}
|
||||
@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp)
|
||||
DMA_TABLE_BYTES,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (tx_ring_dma + size > B44_DMA_MASK) {
|
||||
if (dma_mapping_error(tx_ring_dma) ||
|
||||
tx_ring_dma + size > B44_DMA_MASK) {
|
||||
kfree(tx_ring);
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -53,6 +53,7 @@
|
||||
#define DRV_VERSION "v1.17b"
|
||||
#define DRV_RELDATE "2006/03/10"
|
||||
#include "dl2k.h"
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
static char version[] __devinitdata =
|
||||
KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
|
||||
|
@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy)
|
||||
int status;
|
||||
|
||||
dev = nds[i];
|
||||
if (dev == NULL)
|
||||
continue;
|
||||
|
||||
status = pm3386_is_link_up(i);
|
||||
if (status && !netif_carrier_ok(dev)) {
|
||||
@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up)
|
||||
|
||||
static int __init enp2611_init_module(void)
|
||||
{
|
||||
int ports;
|
||||
int i;
|
||||
|
||||
if (!machine_is_enp2611())
|
||||
@ -199,7 +202,8 @@ static int __init enp2611_init_module(void)
|
||||
caleb_reset();
|
||||
pm3386_reset();
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
ports = pm3386_port_count();
|
||||
for (i = 0; i < ports; i++) {
|
||||
nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
|
||||
if (nds[i] == NULL) {
|
||||
while (--i >= 0)
|
||||
@ -215,9 +219,10 @@ static int __init enp2611_init_module(void)
|
||||
|
||||
ixp2400_msf_init(&enp2611_msf_parameters);
|
||||
|
||||
if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
|
||||
for (i = 0; i < 3; i++)
|
||||
free_netdev(nds[i]);
|
||||
if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
|
||||
for (i = 0; i < ports; i++)
|
||||
if (nds[i])
|
||||
free_netdev(nds[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
|
||||
pm3386_reg_write(port >> 1, reg, value);
|
||||
}
|
||||
|
||||
int pm3386_secondary_present(void)
|
||||
{
|
||||
return pm3386_reg_read(1, 0) == 0x3386;
|
||||
}
|
||||
|
||||
void pm3386_reset(void)
|
||||
{
|
||||
u8 mac[3][6];
|
||||
int secondary;
|
||||
|
||||
secondary = pm3386_secondary_present();
|
||||
|
||||
/* Save programmed MAC addresses. */
|
||||
pm3386_get_mac(0, mac[0]);
|
||||
pm3386_get_mac(1, mac[1]);
|
||||
pm3386_get_mac(2, mac[2]);
|
||||
if (secondary)
|
||||
pm3386_get_mac(2, mac[2]);
|
||||
|
||||
/* Assert analog and digital reset. */
|
||||
pm3386_reg_write(0, 0x002, 0x0060);
|
||||
pm3386_reg_write(1, 0x002, 0x0060);
|
||||
if (secondary)
|
||||
pm3386_reg_write(1, 0x002, 0x0060);
|
||||
mdelay(1);
|
||||
|
||||
/* Deassert analog reset. */
|
||||
pm3386_reg_write(0, 0x002, 0x0062);
|
||||
pm3386_reg_write(1, 0x002, 0x0062);
|
||||
if (secondary)
|
||||
pm3386_reg_write(1, 0x002, 0x0062);
|
||||
mdelay(10);
|
||||
|
||||
/* Deassert digital reset. */
|
||||
pm3386_reg_write(0, 0x002, 0x0063);
|
||||
pm3386_reg_write(1, 0x002, 0x0063);
|
||||
if (secondary)
|
||||
pm3386_reg_write(1, 0x002, 0x0063);
|
||||
mdelay(10);
|
||||
|
||||
/* Restore programmed MAC addresses. */
|
||||
pm3386_set_mac(0, mac[0]);
|
||||
pm3386_set_mac(1, mac[1]);
|
||||
pm3386_set_mac(2, mac[2]);
|
||||
if (secondary)
|
||||
pm3386_set_mac(2, mac[2]);
|
||||
|
||||
/* Disable carrier on all ports. */
|
||||
pm3386_set_carrier(0, 0);
|
||||
pm3386_set_carrier(1, 0);
|
||||
pm3386_set_carrier(2, 0);
|
||||
if (secondary)
|
||||
pm3386_set_carrier(2, 0);
|
||||
}
|
||||
|
||||
static u16 swaph(u16 x)
|
||||
@ -127,6 +140,11 @@ static u16 swaph(u16 x)
|
||||
return ((x << 8) | (x >> 8)) & 0xffff;
|
||||
}
|
||||
|
||||
int pm3386_port_count(void)
|
||||
{
|
||||
return 2 + pm3386_secondary_present();
|
||||
}
|
||||
|
||||
void pm3386_init_port(int port)
|
||||
{
|
||||
int pm = port >> 1;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define __PM3386_H
|
||||
|
||||
void pm3386_reset(void);
|
||||
int pm3386_port_count(void);
|
||||
void pm3386_init_port(int port);
|
||||
void pm3386_get_mac(int port, u8 *mac);
|
||||
void pm3386_set_mac(int port, u8 *mac);
|
||||
|
@ -1020,8 +1020,19 @@ static int sky2_up(struct net_device *dev)
|
||||
struct sky2_hw *hw = sky2->hw;
|
||||
unsigned port = sky2->port;
|
||||
u32 ramsize, rxspace, imask;
|
||||
int err = -ENOMEM;
|
||||
int err;
|
||||
struct net_device *otherdev = hw->dev[sky2->port^1];
|
||||
|
||||
/* Block bringing up both ports at the same time on a dual port card.
|
||||
* There is an unfixed bug where receiver gets confused and picks up
|
||||
* packets out of order. Until this is fixed, prevent data corruption.
|
||||
*/
|
||||
if (otherdev && netif_running(otherdev)) {
|
||||
printk(KERN_INFO PFX "dual port support is disabled.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
if (netif_msg_ifup(sky2))
|
||||
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
|
||||
|
||||
|
@ -634,6 +634,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vi
|
||||
* non-x86 architectures (yes Via exists on PPC among other places),
|
||||
* we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
|
||||
* interrupts delivered properly.
|
||||
*
|
||||
* Some of the on-chip devices are actually '586 devices' so they are
|
||||
* listed here.
|
||||
*/
|
||||
static void quirk_via_irq(struct pci_dev *dev)
|
||||
{
|
||||
@ -648,6 +651,10 @@ static void quirk_via_irq(struct pci_dev *dev)
|
||||
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_via_irq);
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq);
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq);
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq);
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq);
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq);
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq);
|
||||
@ -895,6 +902,7 @@ static void __init k8t_sound_hostbridge(struct pci_dev *dev)
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
|
||||
|
||||
#ifndef CONFIG_ACPI_SLEEP
|
||||
/*
|
||||
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
|
||||
* is not activated. The myth is that Asus said that they do not want the
|
||||
@ -906,8 +914,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_ho
|
||||
* bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
|
||||
* becomes necessary to do this tweak in two steps -- I've chosen the Host
|
||||
* bridge as trigger.
|
||||
*
|
||||
* Actually, leaving it unhidden and not redoing the quirk over suspend2ram
|
||||
* will cause thermal management to break down, and causing machine to
|
||||
* overheat.
|
||||
*/
|
||||
static int __initdata asus_hides_smbus = 0;
|
||||
static int __initdata asus_hides_smbus;
|
||||
|
||||
static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
|
||||
{
|
||||
@ -1050,6 +1062,8 @@ static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 );
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SiS 96x south bridge: BIOS typically hides SMBus device...
|
||||
*/
|
||||
|
@ -426,7 +426,7 @@ static int ds_open(struct inode *inode, struct file *file)
|
||||
|
||||
if (!warning_printed) {
|
||||
printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl "
|
||||
"usage.\n");
|
||||
"usage from process: %s.\n", current->comm);
|
||||
printk(KERN_INFO "pcmcia: This interface will soon be removed from "
|
||||
"the kernel; please expect breakage unless you upgrade "
|
||||
"to new tools.\n");
|
||||
@ -601,8 +601,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
|
||||
ret = CS_BAD_ARGS;
|
||||
else {
|
||||
struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
|
||||
ret = pccard_get_configuration_info(s, p_dev, &buf->config);
|
||||
pcmcia_put_dev(p_dev);
|
||||
if (p_dev == NULL)
|
||||
ret = CS_BAD_ARGS;
|
||||
else {
|
||||
ret = pccard_get_configuration_info(s, p_dev, &buf->config);
|
||||
pcmcia_put_dev(p_dev);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case DS_GET_FIRST_TUPLE:
|
||||
@ -632,8 +636,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
|
||||
ret = CS_BAD_ARGS;
|
||||
else {
|
||||
struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
|
||||
ret = pccard_get_status(s, p_dev, &buf->status);
|
||||
pcmcia_put_dev(p_dev);
|
||||
if (p_dev == NULL)
|
||||
ret = CS_BAD_ARGS;
|
||||
else {
|
||||
ret = pccard_get_status(s, p_dev, &buf->status);
|
||||
pcmcia_put_dev(p_dev);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case DS_VALIDATE_CIS:
|
||||
@ -665,9 +673,10 @@ static int ds_ioctl(struct inode * inode, struct file * file,
|
||||
if (!(buf->conf_reg.Function &&
|
||||
(buf->conf_reg.Function >= s->functions))) {
|
||||
struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function);
|
||||
if (p_dev)
|
||||
if (p_dev) {
|
||||
ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg);
|
||||
pcmcia_put_dev(p_dev);
|
||||
pcmcia_put_dev(p_dev);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case DS_GET_FIRST_REGION:
|
||||
|
@ -1348,7 +1348,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
|
||||
index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
|
||||
- channel->ccws;
|
||||
if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) ||
|
||||
(irb->scsw.cstat | SCHN_STAT_PCI))
|
||||
(irb->scsw.cstat & SCHN_STAT_PCI))
|
||||
/* Bloody io subsystem tells us lies about cpa... */
|
||||
index = (index - 1) & (LCS_NUM_BUFFS - 1);
|
||||
while (channel->io_idx != index) {
|
||||
|
@ -875,6 +875,9 @@ static unsigned int ata_id_xfermask(const u16 *id)
|
||||
/**
|
||||
* ata_port_queue_task - Queue port_task
|
||||
* @ap: The ata_port to queue port_task for
|
||||
* @fn: workqueue function to be scheduled
|
||||
* @data: data value to pass to workqueue function
|
||||
* @delay: delay time for workqueue function
|
||||
*
|
||||
* Schedule @fn(@data) for execution after @delay jiffies using
|
||||
* port_task. There is one port_task per port and it's the
|
||||
@ -3091,8 +3094,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
||||
/**
|
||||
* ata_dev_init_params - Issue INIT DEV PARAMS command
|
||||
* @dev: Device to which command will be sent
|
||||
* @heads: Number of heads
|
||||
* @sectors: Number of sectors
|
||||
* @heads: Number of heads (taskfile parameter)
|
||||
* @sectors: Number of sectors (taskfile parameter)
|
||||
*
|
||||
* LOCKING:
|
||||
* Kernel thread context (may sleep)
|
||||
@ -5007,6 +5010,7 @@ int ata_device_resume(struct ata_device *dev)
|
||||
/**
|
||||
* ata_device_suspend - prepare a device for suspend
|
||||
* @dev: the device to suspend
|
||||
* @state: target power management state
|
||||
*
|
||||
* Flush the cache on the drive, if appropriate, then issue a
|
||||
* standbynow command.
|
||||
|
@ -37,7 +37,7 @@
|
||||
#include <asm/io.h>
|
||||
|
||||
#define DRV_NAME "sata_mv"
|
||||
#define DRV_VERSION "0.6"
|
||||
#define DRV_VERSION "0.7"
|
||||
|
||||
enum {
|
||||
/* BAR's are enumerated in terms of pci_resource_start() terms */
|
||||
@ -50,6 +50,12 @@ enum {
|
||||
|
||||
MV_PCI_REG_BASE = 0,
|
||||
MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
|
||||
MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
|
||||
MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
|
||||
MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
|
||||
MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
|
||||
MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
|
||||
|
||||
MV_SATAHC0_REG_BASE = 0x20000,
|
||||
MV_FLASH_CTL = 0x1046c,
|
||||
MV_GPIO_PORT_CTL = 0x104f0,
|
||||
@ -302,9 +308,6 @@ struct mv_port_priv {
|
||||
dma_addr_t crpb_dma;
|
||||
struct mv_sg *sg_tbl;
|
||||
dma_addr_t sg_tbl_dma;
|
||||
|
||||
unsigned req_producer; /* cp of req_in_ptr */
|
||||
unsigned rsp_consumer; /* cp of rsp_out_ptr */
|
||||
u32 pp_flags;
|
||||
};
|
||||
|
||||
@ -937,8 +940,6 @@ static int mv_port_start(struct ata_port *ap)
|
||||
writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
|
||||
port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
|
||||
|
||||
pp->req_producer = pp->rsp_consumer = 0;
|
||||
|
||||
/* Don't turn on EDMA here...do it before DMA commands only. Else
|
||||
* we'll be unable to send non-data, PIO, etc due to restricted access
|
||||
* to shadow regs.
|
||||
@ -1022,16 +1023,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned mv_inc_q_index(unsigned *index)
|
||||
static inline unsigned mv_inc_q_index(unsigned index)
|
||||
{
|
||||
*index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
|
||||
return *index;
|
||||
return (index + 1) & MV_MAX_Q_DEPTH_MASK;
|
||||
}
|
||||
|
||||
static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
|
||||
{
|
||||
*cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
|
||||
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
|
||||
(last ? CRQB_CMD_LAST : 0);
|
||||
*cmdw = cpu_to_le16(tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1053,15 +1054,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
u16 *cw;
|
||||
struct ata_taskfile *tf;
|
||||
u16 flags = 0;
|
||||
unsigned in_index;
|
||||
|
||||
if (ATA_PROT_DMA != qc->tf.protocol)
|
||||
return;
|
||||
|
||||
/* the req producer index should be the same as we remember it */
|
||||
WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
|
||||
EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
|
||||
pp->req_producer);
|
||||
|
||||
/* Fill in command request block
|
||||
*/
|
||||
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
|
||||
@ -1069,13 +1066,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
|
||||
flags |= qc->tag << CRQB_TAG_SHIFT;
|
||||
|
||||
pp->crqb[pp->req_producer].sg_addr =
|
||||
cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
|
||||
pp->crqb[pp->req_producer].sg_addr_hi =
|
||||
cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
|
||||
pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
|
||||
/* get current queue index from hardware */
|
||||
in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
|
||||
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
|
||||
|
||||
cw = &pp->crqb[pp->req_producer].ata_cmd[0];
|
||||
pp->crqb[in_index].sg_addr =
|
||||
cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
|
||||
pp->crqb[in_index].sg_addr_hi =
|
||||
cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
|
||||
pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
|
||||
|
||||
cw = &pp->crqb[in_index].ata_cmd[0];
|
||||
tf = &qc->tf;
|
||||
|
||||
/* Sadly, the CRQB cannot accomodate all registers--there are
|
||||
@ -1144,16 +1145,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
struct mv_crqb_iie *crqb;
|
||||
struct ata_taskfile *tf;
|
||||
unsigned in_index;
|
||||
u32 flags = 0;
|
||||
|
||||
if (ATA_PROT_DMA != qc->tf.protocol)
|
||||
return;
|
||||
|
||||
/* the req producer index should be the same as we remember it */
|
||||
WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
|
||||
EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
|
||||
pp->req_producer);
|
||||
|
||||
/* Fill in Gen IIE command request block
|
||||
*/
|
||||
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
|
||||
@ -1162,7 +1159,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
|
||||
flags |= qc->tag << CRQB_TAG_SHIFT;
|
||||
|
||||
crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
|
||||
/* get current queue index from hardware */
|
||||
in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
|
||||
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
|
||||
|
||||
crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
|
||||
crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
|
||||
crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
|
||||
crqb->flags = cpu_to_le32(flags);
|
||||
@ -1210,6 +1211,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
|
||||
{
|
||||
void __iomem *port_mmio = mv_ap_base(qc->ap);
|
||||
struct mv_port_priv *pp = qc->ap->private_data;
|
||||
unsigned in_index;
|
||||
u32 in_ptr;
|
||||
|
||||
if (ATA_PROT_DMA != qc->tf.protocol) {
|
||||
@ -1221,23 +1223,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
|
||||
return ata_qc_issue_prot(qc);
|
||||
}
|
||||
|
||||
in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
|
||||
in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
|
||||
in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
|
||||
|
||||
/* the req producer index should be the same as we remember it */
|
||||
WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
|
||||
pp->req_producer);
|
||||
/* until we do queuing, the queue should be empty at this point */
|
||||
WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
|
||||
((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
|
||||
EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
|
||||
WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
|
||||
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
|
||||
|
||||
mv_inc_q_index(&pp->req_producer); /* now incr producer index */
|
||||
in_index = mv_inc_q_index(in_index); /* now incr producer index */
|
||||
|
||||
mv_start_dma(port_mmio, pp);
|
||||
|
||||
/* and write the request in pointer to kick the EDMA to life */
|
||||
in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
|
||||
in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
|
||||
in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
|
||||
writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
|
||||
|
||||
return 0;
|
||||
@ -1260,28 +1259,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = mv_ap_base(ap);
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
unsigned out_index;
|
||||
u32 out_ptr;
|
||||
u8 ata_status;
|
||||
|
||||
out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
|
||||
out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
|
||||
out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
|
||||
|
||||
/* the response consumer index should be the same as we remember it */
|
||||
WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
|
||||
pp->rsp_consumer);
|
||||
|
||||
ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
|
||||
ata_status = le16_to_cpu(pp->crpb[out_index].flags)
|
||||
>> CRPB_FLAG_STATUS_SHIFT;
|
||||
|
||||
/* increment our consumer index... */
|
||||
pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
|
||||
out_index = mv_inc_q_index(out_index);
|
||||
|
||||
/* and, until we do NCQ, there should only be 1 CRPB waiting */
|
||||
WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
|
||||
EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
|
||||
pp->rsp_consumer);
|
||||
WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
|
||||
>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
|
||||
|
||||
/* write out our inc'd consumer index so EDMA knows we're caught up */
|
||||
out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
|
||||
out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
|
||||
out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
|
||||
writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
|
||||
|
||||
/* Return ATA status register for completed CRPB */
|
||||
@ -1291,6 +1288,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
|
||||
/**
|
||||
* mv_err_intr - Handle error interrupts on the port
|
||||
* @ap: ATA channel to manipulate
|
||||
* @reset_allowed: bool: 0 == don't trigger from reset here
|
||||
*
|
||||
* In most cases, just clear the interrupt and move on. However,
|
||||
* some cases require an eDMA reset, which is done right before
|
||||
@ -1301,7 +1299,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
static void mv_err_intr(struct ata_port *ap)
|
||||
static void mv_err_intr(struct ata_port *ap, int reset_allowed)
|
||||
{
|
||||
void __iomem *port_mmio = mv_ap_base(ap);
|
||||
u32 edma_err_cause, serr = 0;
|
||||
@ -1323,9 +1321,8 @@ static void mv_err_intr(struct ata_port *ap)
|
||||
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
|
||||
|
||||
/* check for fatal here and recover if needed */
|
||||
if (EDMA_ERR_FATAL & edma_err_cause) {
|
||||
if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
|
||||
mv_stop_and_reset(ap);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1374,12 +1371,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
|
||||
struct ata_port *ap = host_set->ports[port];
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
|
||||
hard_port = port & MV_PORT_MASK; /* range 0-3 */
|
||||
hard_port = mv_hardport_from_port(port); /* range 0..3 */
|
||||
handled = 0; /* ensure ata_status is set if handled++ */
|
||||
|
||||
/* Note that DEV_IRQ might happen spuriously during EDMA,
|
||||
* and should be ignored in such cases. We could mask it,
|
||||
* but it's pretty rare and may not be worth the overhead.
|
||||
* and should be ignored in such cases.
|
||||
* The cause of this is still under investigation.
|
||||
*/
|
||||
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
|
||||
/* EDMA: check for response queue interrupt */
|
||||
@ -1393,6 +1390,11 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
|
||||
ata_status = readb((void __iomem *)
|
||||
ap->ioaddr.status_addr);
|
||||
handled = 1;
|
||||
/* ignore spurious intr if drive still BUSY */
|
||||
if (ata_status & ATA_BUSY) {
|
||||
ata_status = 0;
|
||||
handled = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1406,7 +1408,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
|
||||
shift++; /* skip bit 8 in the HC Main IRQ reg */
|
||||
}
|
||||
if ((PORT0_ERR << shift) & relevant) {
|
||||
mv_err_intr(ap);
|
||||
mv_err_intr(ap, 1);
|
||||
err_mask |= AC_ERR_OTHER;
|
||||
handled = 1;
|
||||
}
|
||||
@ -1448,6 +1450,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
|
||||
struct ata_host_set *host_set = dev_instance;
|
||||
unsigned int hc, handled = 0, n_hcs;
|
||||
void __iomem *mmio = host_set->mmio_base;
|
||||
struct mv_host_priv *hpriv;
|
||||
u32 irq_stat;
|
||||
|
||||
irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
|
||||
@ -1469,6 +1472,17 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
|
||||
handled++;
|
||||
}
|
||||
}
|
||||
|
||||
hpriv = host_set->private_data;
|
||||
if (IS_60XX(hpriv)) {
|
||||
/* deal with the interrupt coalescing bits */
|
||||
if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
|
||||
writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
|
||||
writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
|
||||
writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
|
||||
}
|
||||
}
|
||||
|
||||
if (PCI_ERR & irq_stat) {
|
||||
printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
|
||||
readl(mmio + PCI_IRQ_CAUSE_OFS));
|
||||
@ -1867,7 +1881,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
|
||||
|
||||
if (IS_60XX(hpriv)) {
|
||||
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
|
||||
ifctl |= (1 << 12) | (1 << 7);
|
||||
ifctl |= (1 << 7); /* enable gen2i speed */
|
||||
ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
|
||||
writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
|
||||
}
|
||||
|
||||
@ -2033,11 +2048,14 @@ static void mv_eng_timeout(struct ata_port *ap)
|
||||
ap->host_set->mmio_base, ap, qc, qc->scsicmd,
|
||||
&qc->scsicmd->cmnd);
|
||||
|
||||
mv_err_intr(ap);
|
||||
mv_err_intr(ap, 0);
|
||||
mv_stop_and_reset(ap);
|
||||
|
||||
qc->err_mask |= AC_ERR_TIMEOUT;
|
||||
ata_eh_qc_complete(qc);
|
||||
WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
|
||||
if (qc->flags & ATA_QCFLAG_ACTIVE) {
|
||||
qc->err_mask |= AC_ERR_TIMEOUT;
|
||||
ata_eh_qc_complete(qc);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2231,7 +2249,8 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
|
||||
void __iomem *port_mmio = mv_port_base(mmio, port);
|
||||
|
||||
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
|
||||
ifctl |= (1 << 12);
|
||||
ifctl |= (1 << 7); /* enable gen2i speed */
|
||||
ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
|
||||
writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
|
||||
}
|
||||
|
||||
@ -2332,6 +2351,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc) {
|
||||
return rc;
|
||||
}
|
||||
pci_set_master(pdev);
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
|
@ -1907,9 +1907,12 @@ uart_set_options(struct uart_port *port, struct console *co,
|
||||
static void uart_change_pm(struct uart_state *state, int pm_state)
|
||||
{
|
||||
struct uart_port *port = state->port;
|
||||
if (port->ops->pm)
|
||||
port->ops->pm(port, pm_state, state->pm_state);
|
||||
state->pm_state = pm_state;
|
||||
|
||||
if (state->pm_state != pm_state) {
|
||||
if (port->ops->pm)
|
||||
port->ops->pm(port, pm_state, state->pm_state);
|
||||
state->pm_state = pm_state;
|
||||
}
|
||||
}
|
||||
|
||||
int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
|
||||
|
@ -75,6 +75,14 @@ config SPI_BUTTERFLY
|
||||
inexpensive battery powered microcontroller evaluation board.
|
||||
This same cable can be used to flash new firmware.
|
||||
|
||||
config SPI_PXA2XX
|
||||
tristate "PXA2xx SSP SPI master"
|
||||
depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL
|
||||
help
|
||||
This enables using a PXA2xx SSP port as a SPI master controller.
|
||||
The driver can be configured to use any SSP port and additional
|
||||
documentation can be found a Documentation/spi/pxa2xx.
|
||||
|
||||
#
|
||||
# Add new SPI master controllers in alphabetical order above this line
|
||||
#
|
||||
|
@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_MASTER) += spi.o
|
||||
# SPI master controller drivers (bus)
|
||||
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
|
||||
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
|
||||
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
|
||||
# ... add above this line ...
|
||||
|
||||
# SPI protocol drivers (device/link on bus)
|
||||
|
1467
drivers/spi/pxa2xx_spi.c
Normal file
1467
drivers/spi/pxa2xx_spi.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master);
|
||||
int __init_or_module
|
||||
spi_register_master(struct spi_master *master)
|
||||
{
|
||||
static atomic_t dyn_bus_id = ATOMIC_INIT(0);
|
||||
static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1);
|
||||
struct device *dev = master->cdev.dev;
|
||||
int status = -ENODEV;
|
||||
int dynamic = 0;
|
||||
@ -404,7 +404,7 @@ spi_register_master(struct spi_master *master)
|
||||
return -ENODEV;
|
||||
|
||||
/* convention: dynamically assigned bus IDs count down from the max */
|
||||
if (master->bus_num == 0) {
|
||||
if (master->bus_num < 0) {
|
||||
master->bus_num = atomic_dec_return(&dyn_bus_id);
|
||||
dynamic = 1;
|
||||
}
|
||||
@ -522,7 +522,8 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_sync);
|
||||
|
||||
#define SPI_BUFSIZ (SMP_CACHE_BYTES)
|
||||
/* portable code must never pass more than 32 bytes */
|
||||
#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
|
||||
|
||||
static u8 *buf;
|
||||
|
||||
|
@ -138,6 +138,45 @@ static unsigned bitbang_txrx_32(
|
||||
return t->len - count;
|
||||
}
|
||||
|
||||
int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
|
||||
{
|
||||
struct spi_bitbang_cs *cs = spi->controller_state;
|
||||
u8 bits_per_word;
|
||||
u32 hz;
|
||||
|
||||
if (t) {
|
||||
bits_per_word = t->bits_per_word;
|
||||
hz = t->speed_hz;
|
||||
} else {
|
||||
bits_per_word = 0;
|
||||
hz = 0;
|
||||
}
|
||||
|
||||
/* spi_transfer level calls that work per-word */
|
||||
if (!bits_per_word)
|
||||
bits_per_word = spi->bits_per_word;
|
||||
if (bits_per_word <= 8)
|
||||
cs->txrx_bufs = bitbang_txrx_8;
|
||||
else if (bits_per_word <= 16)
|
||||
cs->txrx_bufs = bitbang_txrx_16;
|
||||
else if (bits_per_word <= 32)
|
||||
cs->txrx_bufs = bitbang_txrx_32;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
/* nsecs = (clock period)/2 */
|
||||
if (!hz)
|
||||
hz = spi->max_speed_hz;
|
||||
if (hz) {
|
||||
cs->nsecs = (1000000000/2) / hz;
|
||||
if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
|
||||
|
||||
/**
|
||||
* spi_bitbang_setup - default setup for per-word I/O loops
|
||||
*/
|
||||
@ -145,8 +184,16 @@ int spi_bitbang_setup(struct spi_device *spi)
|
||||
{
|
||||
struct spi_bitbang_cs *cs = spi->controller_state;
|
||||
struct spi_bitbang *bitbang;
|
||||
int retval;
|
||||
|
||||
if (!spi->max_speed_hz)
|
||||
bitbang = spi_master_get_devdata(spi->master);
|
||||
|
||||
/* REVISIT: some systems will want to support devices using lsb-first
|
||||
* bit encodings on the wire. In pure software that would be trivial,
|
||||
* just bitbang_txrx_le_cphaX() routines shifting the other way, and
|
||||
* some hardware controllers also have this support.
|
||||
*/
|
||||
if ((spi->mode & SPI_LSB_FIRST) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (!cs) {
|
||||
@ -155,32 +202,20 @@ int spi_bitbang_setup(struct spi_device *spi)
|
||||
return -ENOMEM;
|
||||
spi->controller_state = cs;
|
||||
}
|
||||
bitbang = spi_master_get_devdata(spi->master);
|
||||
|
||||
if (!spi->bits_per_word)
|
||||
spi->bits_per_word = 8;
|
||||
|
||||
/* spi_transfer level calls that work per-word */
|
||||
if (spi->bits_per_word <= 8)
|
||||
cs->txrx_bufs = bitbang_txrx_8;
|
||||
else if (spi->bits_per_word <= 16)
|
||||
cs->txrx_bufs = bitbang_txrx_16;
|
||||
else if (spi->bits_per_word <= 32)
|
||||
cs->txrx_bufs = bitbang_txrx_32;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
/* per-word shift register access, in hardware or bitbanging */
|
||||
cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
|
||||
if (!cs->txrx_word)
|
||||
return -EINVAL;
|
||||
|
||||
/* nsecs = (clock period)/2 */
|
||||
cs->nsecs = (1000000000/2) / (spi->max_speed_hz);
|
||||
if (cs->nsecs > MAX_UDELAY_MS * 1000)
|
||||
return -EINVAL;
|
||||
retval = spi_bitbang_setup_transfer(spi, NULL);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
|
||||
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
|
||||
__FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
|
||||
spi->bits_per_word, 2 * cs->nsecs);
|
||||
|
||||
@ -246,6 +281,8 @@ static void bitbang_work(void *_bitbang)
|
||||
unsigned tmp;
|
||||
unsigned cs_change;
|
||||
int status;
|
||||
int (*setup_transfer)(struct spi_device *,
|
||||
struct spi_transfer *);
|
||||
|
||||
m = container_of(bitbang->queue.next, struct spi_message,
|
||||
queue);
|
||||
@ -262,6 +299,7 @@ static void bitbang_work(void *_bitbang)
|
||||
tmp = 0;
|
||||
cs_change = 1;
|
||||
status = 0;
|
||||
setup_transfer = NULL;
|
||||
|
||||
list_for_each_entry (t, &m->transfers, transfer_list) {
|
||||
if (bitbang->shutdown) {
|
||||
@ -269,6 +307,20 @@ static void bitbang_work(void *_bitbang)
|
||||
break;
|
||||
}
|
||||
|
||||
/* override or restore speed and wordsize */
|
||||
if (t->speed_hz || t->bits_per_word) {
|
||||
setup_transfer = bitbang->setup_transfer;
|
||||
if (!setup_transfer) {
|
||||
status = -ENOPROTOOPT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (setup_transfer) {
|
||||
status = setup_transfer(spi, t);
|
||||
if (status < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* set up default clock polarity, and activate chip;
|
||||
* this implicitly updates clock and spi modes as
|
||||
* previously recorded for this device via setup().
|
||||
@ -325,6 +377,10 @@ static void bitbang_work(void *_bitbang)
|
||||
m->status = status;
|
||||
m->complete(m->context);
|
||||
|
||||
/* restore speed and wordsize */
|
||||
if (setup_transfer)
|
||||
setup_transfer(spi, NULL);
|
||||
|
||||
/* normally deactivate chipselect ... unless no error and
|
||||
* cs_change has hinted that the next message will probably
|
||||
* be for this chip too.
|
||||
@ -348,6 +404,7 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
|
||||
{
|
||||
struct spi_bitbang *bitbang;
|
||||
unsigned long flags;
|
||||
int status = 0;
|
||||
|
||||
m->actual_length = 0;
|
||||
m->status = -EINPROGRESS;
|
||||
@ -357,11 +414,15 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
|
||||
return -ESHUTDOWN;
|
||||
|
||||
spin_lock_irqsave(&bitbang->lock, flags);
|
||||
list_add_tail(&m->queue, &bitbang->queue);
|
||||
queue_work(bitbang->workqueue, &bitbang->work);
|
||||
if (!spi->max_speed_hz)
|
||||
status = -ENETDOWN;
|
||||
else {
|
||||
list_add_tail(&m->queue, &bitbang->queue);
|
||||
queue_work(bitbang->workqueue, &bitbang->work);
|
||||
}
|
||||
spin_unlock_irqrestore(&bitbang->lock, flags);
|
||||
|
||||
return 0;
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
|
||||
|
||||
@ -406,6 +467,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
|
||||
bitbang->use_dma = 0;
|
||||
bitbang->txrx_bufs = spi_bitbang_bufs;
|
||||
if (!bitbang->master->setup) {
|
||||
if (!bitbang->setup_transfer)
|
||||
bitbang->setup_transfer =
|
||||
spi_bitbang_setup_transfer;
|
||||
bitbang->master->setup = spi_bitbang_setup;
|
||||
bitbang->master->cleanup = spi_bitbang_cleanup;
|
||||
}
|
||||
|
@ -29,12 +29,15 @@ static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
|
||||
|
||||
static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count)
|
||||
{
|
||||
int rc = -ENXIO, power;
|
||||
int rc = -ENXIO;
|
||||
char *endp;
|
||||
struct backlight_device *bd = to_backlight_device(cdev);
|
||||
int power = simple_strtoul(buf, &endp, 0);
|
||||
size_t size = endp - buf;
|
||||
|
||||
power = simple_strtoul(buf, &endp, 0);
|
||||
if (*endp && !isspace(*endp))
|
||||
if (*endp && isspace(*endp))
|
||||
size++;
|
||||
if (size != count)
|
||||
return -EINVAL;
|
||||
|
||||
down(&bd->sem);
|
||||
@ -65,12 +68,15 @@ static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
|
||||
|
||||
static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count)
|
||||
{
|
||||
int rc = -ENXIO, brightness;
|
||||
int rc = -ENXIO;
|
||||
char *endp;
|
||||
struct backlight_device *bd = to_backlight_device(cdev);
|
||||
int brightness = simple_strtoul(buf, &endp, 0);
|
||||
size_t size = endp - buf;
|
||||
|
||||
brightness = simple_strtoul(buf, &endp, 0);
|
||||
if (*endp && !isspace(*endp))
|
||||
if (*endp && isspace(*endp))
|
||||
size++;
|
||||
if (size != count)
|
||||
return -EINVAL;
|
||||
|
||||
down(&bd->sem);
|
||||
|
@ -31,12 +31,15 @@ static ssize_t lcd_show_power(struct class_device *cdev, char *buf)
|
||||
|
||||
static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count)
|
||||
{
|
||||
int rc, power;
|
||||
int rc = -ENXIO;
|
||||
char *endp;
|
||||
struct lcd_device *ld = to_lcd_device(cdev);
|
||||
int power = simple_strtoul(buf, &endp, 0);
|
||||
size_t size = endp - buf;
|
||||
|
||||
power = simple_strtoul(buf, &endp, 0);
|
||||
if (*endp && !isspace(*endp))
|
||||
if (*endp && isspace(*endp))
|
||||
size++;
|
||||
if (size != count)
|
||||
return -EINVAL;
|
||||
|
||||
down(&ld->sem);
|
||||
@ -44,8 +47,7 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_
|
||||
pr_debug("lcd: set power to %d\n", power);
|
||||
ld->props->set_power(ld, power);
|
||||
rc = count;
|
||||
} else
|
||||
rc = -ENXIO;
|
||||
}
|
||||
up(&ld->sem);
|
||||
|
||||
return rc;
|
||||
@ -53,14 +55,12 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_
|
||||
|
||||
static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
|
||||
{
|
||||
int rc;
|
||||
int rc = -ENXIO;
|
||||
struct lcd_device *ld = to_lcd_device(cdev);
|
||||
|
||||
down(&ld->sem);
|
||||
if (likely(ld->props && ld->props->get_contrast))
|
||||
rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld));
|
||||
else
|
||||
rc = -ENXIO;
|
||||
up(&ld->sem);
|
||||
|
||||
return rc;
|
||||
@ -68,12 +68,15 @@ static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
|
||||
|
||||
static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count)
|
||||
{
|
||||
int rc, contrast;
|
||||
int rc = -ENXIO;
|
||||
char *endp;
|
||||
struct lcd_device *ld = to_lcd_device(cdev);
|
||||
int contrast = simple_strtoul(buf, &endp, 0);
|
||||
size_t size = endp - buf;
|
||||
|
||||
contrast = simple_strtoul(buf, &endp, 0);
|
||||
if (*endp && !isspace(*endp))
|
||||
if (*endp && isspace(*endp))
|
||||
size++;
|
||||
if (size != count)
|
||||
return -EINVAL;
|
||||
|
||||
down(&ld->sem);
|
||||
@ -81,8 +84,7 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si
|
||||
pr_debug("lcd: set contrast to %d\n", contrast);
|
||||
ld->props->set_contrast(ld, contrast);
|
||||
rc = count;
|
||||
} else
|
||||
rc = -ENXIO;
|
||||
}
|
||||
up(&ld->sem);
|
||||
|
||||
return rc;
|
||||
@ -90,14 +92,12 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si
|
||||
|
||||
static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf)
|
||||
{
|
||||
int rc;
|
||||
int rc = -ENXIO;
|
||||
struct lcd_device *ld = to_lcd_device(cdev);
|
||||
|
||||
down(&ld->sem);
|
||||
if (likely(ld->props))
|
||||
rc = sprintf(buf, "%d\n", ld->props->max_contrast);
|
||||
else
|
||||
rc = -ENXIO;
|
||||
up(&ld->sem);
|
||||
|
||||
return rc;
|
||||
|
@ -98,23 +98,20 @@ v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname,
|
||||
static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc,
|
||||
struct v9fs_fcall *rc, int err)
|
||||
{
|
||||
int fid;
|
||||
int fid, id;
|
||||
struct v9fs_session_info *v9ses;
|
||||
|
||||
if (err)
|
||||
return;
|
||||
|
||||
id = 0;
|
||||
fid = tc->params.tclunk.fid;
|
||||
if (rc)
|
||||
id = rc->id;
|
||||
|
||||
kfree(tc);
|
||||
|
||||
if (!rc)
|
||||
return;
|
||||
|
||||
v9ses = a;
|
||||
if (rc->id == RCLUNK)
|
||||
v9fs_put_idpool(fid, &v9ses->fidpool);
|
||||
|
||||
kfree(rc);
|
||||
if (id == RCLUNK) {
|
||||
v9ses = a;
|
||||
v9fs_put_idpool(fid, &v9ses->fidpool);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
222
fs/9p/mux.c
222
fs/9p/mux.c
@ -50,15 +50,23 @@ enum {
|
||||
Wpending = 8, /* can write */
|
||||
};
|
||||
|
||||
enum {
|
||||
None,
|
||||
Flushing,
|
||||
Flushed,
|
||||
};
|
||||
|
||||
struct v9fs_mux_poll_task;
|
||||
|
||||
struct v9fs_req {
|
||||
spinlock_t lock;
|
||||
int tag;
|
||||
struct v9fs_fcall *tcall;
|
||||
struct v9fs_fcall *rcall;
|
||||
int err;
|
||||
v9fs_mux_req_callback cb;
|
||||
void *cba;
|
||||
int flush;
|
||||
struct list_head req_list;
|
||||
};
|
||||
|
||||
@ -96,8 +104,8 @@ struct v9fs_mux_poll_task {
|
||||
|
||||
struct v9fs_mux_rpc {
|
||||
struct v9fs_mux_data *m;
|
||||
struct v9fs_req *req;
|
||||
int err;
|
||||
struct v9fs_fcall *tcall;
|
||||
struct v9fs_fcall *rcall;
|
||||
wait_queue_head_t wqueue;
|
||||
};
|
||||
@ -524,10 +532,9 @@ again:
|
||||
|
||||
static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
|
||||
{
|
||||
int ecode, tag;
|
||||
int ecode;
|
||||
struct v9fs_str *ename;
|
||||
|
||||
tag = req->tag;
|
||||
if (!req->err && req->rcall->id == RERROR) {
|
||||
ecode = req->rcall->params.rerror.errno;
|
||||
ename = &req->rcall->params.rerror.error;
|
||||
@ -553,23 +560,6 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
|
||||
if (!req->err)
|
||||
req->err = -EIO;
|
||||
}
|
||||
|
||||
if (req->err == ERREQFLUSH)
|
||||
return;
|
||||
|
||||
if (req->cb) {
|
||||
dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
|
||||
req->tcall, req->rcall);
|
||||
|
||||
(*req->cb) (req->cba, req->tcall, req->rcall, req->err);
|
||||
req->cb = NULL;
|
||||
} else
|
||||
kfree(req->rcall);
|
||||
|
||||
v9fs_mux_put_tag(m, tag);
|
||||
|
||||
wake_up(&m->equeue);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -669,17 +659,26 @@ static void v9fs_read_work(void *a)
|
||||
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
|
||||
if (rreq->tag == rcall->tag) {
|
||||
req = rreq;
|
||||
req->rcall = rcall;
|
||||
list_del(&req->req_list);
|
||||
spin_unlock(&m->lock);
|
||||
process_request(m, req);
|
||||
if (req->flush != Flushing)
|
||||
list_del(&req->req_list);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
spin_unlock(&m->lock);
|
||||
|
||||
if (!req) {
|
||||
spin_unlock(&m->lock);
|
||||
if (req) {
|
||||
req->rcall = rcall;
|
||||
process_request(m, req);
|
||||
|
||||
if (req->flush != Flushing) {
|
||||
if (req->cb)
|
||||
(*req->cb) (req, req->cba);
|
||||
else
|
||||
kfree(req->rcall);
|
||||
|
||||
wake_up(&m->equeue);
|
||||
}
|
||||
} else {
|
||||
if (err >= 0 && rcall->id != RFLUSH)
|
||||
dprintk(DEBUG_ERROR,
|
||||
"unexpected response mux %p id %d tag %d\n",
|
||||
@ -746,7 +745,6 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
v9fs_set_tag(tc, n);
|
||||
|
||||
if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
|
||||
char buf[150];
|
||||
|
||||
@ -754,12 +752,14 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
|
||||
printk(KERN_NOTICE "<<< %p %s\n", m, buf);
|
||||
}
|
||||
|
||||
spin_lock_init(&req->lock);
|
||||
req->tag = n;
|
||||
req->tcall = tc;
|
||||
req->rcall = NULL;
|
||||
req->err = 0;
|
||||
req->cb = cb;
|
||||
req->cba = cba;
|
||||
req->flush = None;
|
||||
|
||||
spin_lock(&m->lock);
|
||||
list_add_tail(&req->req_list, &m->unsent_req_list);
|
||||
@ -776,72 +776,108 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
|
||||
return req;
|
||||
}
|
||||
|
||||
static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc,
|
||||
struct v9fs_fcall *rc, int err)
|
||||
static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req)
|
||||
{
|
||||
v9fs_mux_put_tag(m, req->tag);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a)
|
||||
{
|
||||
v9fs_mux_req_callback cb;
|
||||
int tag;
|
||||
struct v9fs_mux_data *m;
|
||||
struct v9fs_req *req, *rptr;
|
||||
struct v9fs_req *req, *rreq, *rptr;
|
||||
|
||||
m = a;
|
||||
dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
|
||||
rc, err, tc->params.tflush.oldtag);
|
||||
dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
|
||||
freq->tcall, freq->rcall, freq->err,
|
||||
freq->tcall->params.tflush.oldtag);
|
||||
|
||||
spin_lock(&m->lock);
|
||||
cb = NULL;
|
||||
tag = tc->params.tflush.oldtag;
|
||||
list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
|
||||
if (req->tag == tag) {
|
||||
tag = freq->tcall->params.tflush.oldtag;
|
||||
req = NULL;
|
||||
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
|
||||
if (rreq->tag == tag) {
|
||||
req = rreq;
|
||||
list_del(&req->req_list);
|
||||
if (req->cb) {
|
||||
cb = req->cb;
|
||||
req->cb = NULL;
|
||||
spin_unlock(&m->lock);
|
||||
(*cb) (req->cba, req->tcall, req->rcall,
|
||||
req->err);
|
||||
}
|
||||
kfree(req);
|
||||
wake_up(&m->equeue);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&m->lock);
|
||||
|
||||
if (!cb)
|
||||
spin_unlock(&m->lock);
|
||||
if (req) {
|
||||
spin_lock(&req->lock);
|
||||
req->flush = Flushed;
|
||||
spin_unlock(&req->lock);
|
||||
|
||||
v9fs_mux_put_tag(m, tag);
|
||||
kfree(tc);
|
||||
kfree(rc);
|
||||
if (req->cb)
|
||||
(*req->cb) (req, req->cba);
|
||||
else
|
||||
kfree(req->rcall);
|
||||
|
||||
wake_up(&m->equeue);
|
||||
}
|
||||
|
||||
kfree(freq->tcall);
|
||||
kfree(freq->rcall);
|
||||
v9fs_mux_free_request(m, freq);
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
|
||||
{
|
||||
struct v9fs_fcall *fc;
|
||||
struct v9fs_req *rreq, *rptr;
|
||||
|
||||
dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
|
||||
|
||||
/* if a response was received for a request, do nothing */
|
||||
spin_lock(&req->lock);
|
||||
if (req->rcall || req->err) {
|
||||
spin_unlock(&req->lock);
|
||||
dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
req->flush = Flushing;
|
||||
spin_unlock(&req->lock);
|
||||
|
||||
spin_lock(&m->lock);
|
||||
/* if the request is not sent yet, just remove it from the list */
|
||||
list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
|
||||
if (rreq->tag == req->tag) {
|
||||
dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req);
|
||||
list_del(&rreq->req_list);
|
||||
req->flush = Flushed;
|
||||
spin_unlock(&m->lock);
|
||||
if (req->cb)
|
||||
(*req->cb) (req, req->cba);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
spin_unlock(&m->lock);
|
||||
|
||||
clear_thread_flag(TIF_SIGPENDING);
|
||||
fc = v9fs_create_tflush(req->tag);
|
||||
v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
|
||||
v9fs_mux_rpc_cb(struct v9fs_req *req, void *a)
|
||||
{
|
||||
struct v9fs_mux_rpc *r;
|
||||
|
||||
if (err == ERREQFLUSH) {
|
||||
kfree(rc);
|
||||
dprintk(DEBUG_MUX, "err req flush\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dprintk(DEBUG_MUX, "req %p r %p\n", req, a);
|
||||
r = a;
|
||||
dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
|
||||
tc, rc, err);
|
||||
r->rcall = rc;
|
||||
r->err = err;
|
||||
r->rcall = req->rcall;
|
||||
r->err = req->err;
|
||||
|
||||
if (req->flush!=None && !req->err)
|
||||
r->err = -ERESTARTSYS;
|
||||
|
||||
wake_up(&r->wqueue);
|
||||
}
|
||||
|
||||
@ -856,12 +892,13 @@ int
|
||||
v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
|
||||
struct v9fs_fcall **rc)
|
||||
{
|
||||
int err;
|
||||
int err, sigpending;
|
||||
unsigned long flags;
|
||||
struct v9fs_req *req;
|
||||
struct v9fs_mux_rpc r;
|
||||
|
||||
r.err = 0;
|
||||
r.tcall = tc;
|
||||
r.rcall = NULL;
|
||||
r.m = m;
|
||||
init_waitqueue_head(&r.wqueue);
|
||||
@ -869,48 +906,50 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
|
||||
if (rc)
|
||||
*rc = NULL;
|
||||
|
||||
sigpending = 0;
|
||||
if (signal_pending(current)) {
|
||||
sigpending = 1;
|
||||
clear_thread_flag(TIF_SIGPENDING);
|
||||
}
|
||||
|
||||
req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
dprintk(DEBUG_MUX, "error %d\n", err);
|
||||
return PTR_ERR(req);
|
||||
return err;
|
||||
}
|
||||
|
||||
r.req = req;
|
||||
dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
|
||||
req->tag, &r, req);
|
||||
err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
|
||||
if (r.err < 0)
|
||||
err = r.err;
|
||||
|
||||
if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
|
||||
spin_lock(&m->lock);
|
||||
req->tcall = NULL;
|
||||
req->err = ERREQFLUSH;
|
||||
spin_unlock(&m->lock);
|
||||
if (v9fs_mux_flush_request(m, req)) {
|
||||
/* wait until we get response of the flush message */
|
||||
do {
|
||||
clear_thread_flag(TIF_SIGPENDING);
|
||||
err = wait_event_interruptible(r.wqueue,
|
||||
r.rcall || r.err);
|
||||
} while (!r.rcall && !r.err && err==-ERESTARTSYS &&
|
||||
m->trans->status==Connected && !m->err);
|
||||
}
|
||||
sigpending = 1;
|
||||
}
|
||||
|
||||
clear_thread_flag(TIF_SIGPENDING);
|
||||
v9fs_mux_flush_request(m, req);
|
||||
if (sigpending) {
|
||||
spin_lock_irqsave(¤t->sighand->siglock, flags);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
if (r.rcall)
|
||||
dprintk(DEBUG_MUX, "got response id %d tag %d\n",
|
||||
r.rcall->id, r.rcall->tag);
|
||||
|
||||
if (rc)
|
||||
*rc = r.rcall;
|
||||
else
|
||||
kfree(r.rcall);
|
||||
} else {
|
||||
if (rc)
|
||||
*rc = r.rcall;
|
||||
else
|
||||
kfree(r.rcall);
|
||||
dprintk(DEBUG_MUX, "got error %d\n", err);
|
||||
if (err > 0)
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
v9fs_mux_free_request(m, req);
|
||||
if (err > 0)
|
||||
err = -EIO;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -951,12 +990,15 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
|
||||
struct v9fs_req *req, *rtmp;
|
||||
LIST_HEAD(cancel_list);
|
||||
|
||||
dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
|
||||
dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err);
|
||||
m->err = err;
|
||||
spin_lock(&m->lock);
|
||||
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
|
||||
list_move(&req->req_list, &cancel_list);
|
||||
}
|
||||
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
|
||||
list_move(&req->req_list, &cancel_list);
|
||||
}
|
||||
spin_unlock(&m->lock);
|
||||
|
||||
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
|
||||
@ -965,11 +1007,9 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
|
||||
req->err = err;
|
||||
|
||||
if (req->cb)
|
||||
(*req->cb) (req->cba, req->tcall, req->rcall, req->err);
|
||||
(*req->cb) (req, req->cba);
|
||||
else
|
||||
kfree(req->rcall);
|
||||
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
wake_up(&m->equeue);
|
||||
|
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
struct v9fs_mux_data;
|
||||
struct v9fs_req;
|
||||
|
||||
/**
|
||||
* v9fs_mux_req_callback - callback function that is called when the
|
||||
@ -36,8 +37,7 @@ struct v9fs_mux_data;
|
||||
* @rc - response call
|
||||
* @err - error code (non-zero if error occured)
|
||||
*/
|
||||
typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc,
|
||||
struct v9fs_fcall *rc, int err);
|
||||
typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a);
|
||||
|
||||
int v9fs_mux_global_init(void);
|
||||
void v9fs_mux_global_exit(void);
|
||||
|
@ -72,11 +72,17 @@ int v9fs_file_open(struct inode *inode, struct file *file)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL);
|
||||
err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall);
|
||||
if (err < 0) {
|
||||
dprintk(DEBUG_ERROR, "rewalk didn't work\n");
|
||||
goto put_fid;
|
||||
if (fcall && fcall->id == RWALK)
|
||||
goto clunk_fid;
|
||||
else {
|
||||
v9fs_put_idpool(fid, &v9ses->fidpool);
|
||||
goto free_fcall;
|
||||
}
|
||||
}
|
||||
kfree(fcall);
|
||||
|
||||
/* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */
|
||||
/* translate open mode appropriately */
|
||||
@ -109,8 +115,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
|
||||
clunk_fid:
|
||||
v9fs_t_clunk(v9ses, fid);
|
||||
|
||||
put_fid:
|
||||
v9fs_put_idpool(fid, &v9ses->fidpool);
|
||||
free_fcall:
|
||||
kfree(fcall);
|
||||
|
||||
return err;
|
||||
|
@ -270,7 +270,10 @@ v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm,
|
||||
err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall);
|
||||
if (err < 0) {
|
||||
PRINT_FCALL_ERROR("clone error", fcall);
|
||||
goto put_fid;
|
||||
if (fcall && fcall->id == RWALK)
|
||||
goto clunk_fid;
|
||||
else
|
||||
goto put_fid;
|
||||
}
|
||||
kfree(fcall);
|
||||
|
||||
@ -322,6 +325,9 @@ v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
|
||||
&fcall);
|
||||
|
||||
if (err < 0) {
|
||||
if (fcall && fcall->id == RWALK)
|
||||
goto clunk_fid;
|
||||
|
||||
PRINT_FCALL_ERROR("walk error", fcall);
|
||||
v9fs_put_idpool(nfid, &v9ses->fidpool);
|
||||
goto error;
|
||||
@ -640,19 +646,26 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||
}
|
||||
|
||||
result = v9fs_t_walk(v9ses, dirfidnum, newfid,
|
||||
(char *)dentry->d_name.name, NULL);
|
||||
(char *)dentry->d_name.name, &fcall);
|
||||
|
||||
if (result < 0) {
|
||||
v9fs_put_idpool(newfid, &v9ses->fidpool);
|
||||
if (fcall && fcall->id == RWALK)
|
||||
v9fs_t_clunk(v9ses, newfid);
|
||||
else
|
||||
v9fs_put_idpool(newfid, &v9ses->fidpool);
|
||||
|
||||
if (result == -ENOENT) {
|
||||
d_add(dentry, NULL);
|
||||
dprintk(DEBUG_VFS,
|
||||
"Return negative dentry %p count %d\n",
|
||||
dentry, atomic_read(&dentry->d_count));
|
||||
kfree(fcall);
|
||||
return NULL;
|
||||
}
|
||||
dprintk(DEBUG_ERROR, "walk error:%d\n", result);
|
||||
goto FreeFcall;
|
||||
}
|
||||
kfree(fcall);
|
||||
|
||||
result = v9fs_t_stat(v9ses, newfid, &fcall);
|
||||
if (result < 0) {
|
||||
|
@ -45,6 +45,7 @@ obj-$(CONFIG_DNOTIFY) += dnotify.o
|
||||
obj-$(CONFIG_PROC_FS) += proc/
|
||||
obj-y += partitions/
|
||||
obj-$(CONFIG_SYSFS) += sysfs/
|
||||
obj-$(CONFIG_CONFIGFS_FS) += configfs/
|
||||
obj-y += devpts/
|
||||
|
||||
obj-$(CONFIG_PROFILING) += dcookies.o
|
||||
@ -100,5 +101,4 @@ obj-$(CONFIG_BEFS_FS) += befs/
|
||||
obj-$(CONFIG_HOSTFS) += hostfs/
|
||||
obj-$(CONFIG_HPPFS) += hppfs/
|
||||
obj-$(CONFIG_DEBUG_FS) += debugfs/
|
||||
obj-$(CONFIG_CONFIGFS_FS) += configfs/
|
||||
obj-$(CONFIG_OCFS2_FS) += ocfs2/
|
||||
|
@ -74,8 +74,8 @@ struct autofs_wait_queue {
|
||||
struct autofs_wait_queue *next;
|
||||
autofs_wqt_t wait_queue_token;
|
||||
/* We use the following to see what we are waiting for */
|
||||
int hash;
|
||||
int len;
|
||||
unsigned int hash;
|
||||
unsigned int len;
|
||||
char *name;
|
||||
u32 dev;
|
||||
u64 ino;
|
||||
@ -85,7 +85,6 @@ struct autofs_wait_queue {
|
||||
pid_t tgid;
|
||||
/* This is for status reporting upon return */
|
||||
int status;
|
||||
atomic_t notify;
|
||||
atomic_t wait_ctr;
|
||||
};
|
||||
|
||||
|
@ -327,6 +327,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
|
||||
static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
|
||||
{
|
||||
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
|
||||
struct autofs_info *ino = autofs4_dentry_ino(dentry);
|
||||
int oz_mode = autofs4_oz_mode(sbi);
|
||||
unsigned int lookup_type;
|
||||
int status;
|
||||
@ -340,13 +341,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
|
||||
if (oz_mode || !lookup_type)
|
||||
goto done;
|
||||
|
||||
/*
|
||||
* If a request is pending wait for it.
|
||||
* If it's a mount then it won't be expired till at least
|
||||
* a liitle later and if it's an expire then we might need
|
||||
* to mount it again.
|
||||
*/
|
||||
if (autofs4_ispending(dentry)) {
|
||||
/* If an expire request is pending wait for it. */
|
||||
if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
|
||||
DPRINTK("waiting for active request %p name=%.*s",
|
||||
dentry, dentry->d_name.len, dentry->d_name.name);
|
||||
|
||||
|
@ -189,14 +189,30 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
|
||||
return len;
|
||||
}
|
||||
|
||||
static struct autofs_wait_queue *
|
||||
autofs4_find_wait(struct autofs_sb_info *sbi,
|
||||
char *name, unsigned int hash, unsigned int len)
|
||||
{
|
||||
struct autofs_wait_queue *wq;
|
||||
|
||||
for (wq = sbi->queues; wq; wq = wq->next) {
|
||||
if (wq->hash == hash &&
|
||||
wq->len == len &&
|
||||
wq->name && !memcmp(wq->name, name, len))
|
||||
break;
|
||||
}
|
||||
return wq;
|
||||
}
|
||||
|
||||
int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
|
||||
enum autofs_notify notify)
|
||||
{
|
||||
struct autofs_info *ino;
|
||||
struct autofs_wait_queue *wq;
|
||||
char *name;
|
||||
unsigned int len = 0;
|
||||
unsigned int hash = 0;
|
||||
int status;
|
||||
int status, type;
|
||||
|
||||
/* In catatonic mode, we don't wait for nobody */
|
||||
if (sbi->catatonic)
|
||||
@ -223,21 +239,41 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
for (wq = sbi->queues ; wq ; wq = wq->next) {
|
||||
if (wq->hash == dentry->d_name.hash &&
|
||||
wq->len == len &&
|
||||
wq->name && !memcmp(wq->name, name, len))
|
||||
break;
|
||||
wq = autofs4_find_wait(sbi, name, hash, len);
|
||||
ino = autofs4_dentry_ino(dentry);
|
||||
if (!wq && ino && notify == NFY_NONE) {
|
||||
/*
|
||||
* Either we've betean the pending expire to post it's
|
||||
* wait or it finished while we waited on the mutex.
|
||||
* So we need to wait till either, the wait appears
|
||||
* or the expire finishes.
|
||||
*/
|
||||
|
||||
while (ino->flags & AUTOFS_INF_EXPIRING) {
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
schedule_timeout_interruptible(HZ/10);
|
||||
if (mutex_lock_interruptible(&sbi->wq_mutex)) {
|
||||
kfree(name);
|
||||
return -EINTR;
|
||||
}
|
||||
wq = autofs4_find_wait(sbi, name, hash, len);
|
||||
if (wq)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Not ideal but the status has already gone. Of the two
|
||||
* cases where we wait on NFY_NONE neither depend on the
|
||||
* return status of the wait.
|
||||
*/
|
||||
if (!wq) {
|
||||
kfree(name);
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!wq) {
|
||||
/* Can't wait for an expire if there's no mount */
|
||||
if (notify == NFY_NONE && !d_mountpoint(dentry)) {
|
||||
kfree(name);
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Create a new wait queue */
|
||||
wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
|
||||
if (!wq) {
|
||||
@ -263,20 +299,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
|
||||
wq->tgid = current->tgid;
|
||||
wq->status = -EINTR; /* Status return if interrupted */
|
||||
atomic_set(&wq->wait_ctr, 2);
|
||||
atomic_set(&wq->notify, 1);
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
} else {
|
||||
atomic_inc(&wq->wait_ctr);
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
kfree(name);
|
||||
DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
|
||||
(unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
|
||||
}
|
||||
|
||||
if (notify != NFY_NONE && atomic_read(&wq->notify)) {
|
||||
int type;
|
||||
|
||||
atomic_dec(&wq->notify);
|
||||
|
||||
if (sbi->version < 5) {
|
||||
if (notify == NFY_MOUNT)
|
||||
@ -299,6 +322,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
|
||||
|
||||
/* autofs4_notify_daemon() may block */
|
||||
autofs4_notify_daemon(sbi, wq, type);
|
||||
} else {
|
||||
atomic_inc(&wq->wait_ctr);
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
kfree(name);
|
||||
DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
|
||||
(unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
|
||||
}
|
||||
|
||||
/* wq->name is NULL if and only if the lock is already released */
|
||||
|
@ -1913,7 +1913,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
|
||||
}
|
||||
|
||||
if (sigmask) {
|
||||
if (sigsetsize |= sizeof(compat_sigset_t))
|
||||
if (sigsetsize != sizeof(compat_sigset_t))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
|
||||
return -EFAULT;
|
||||
|
@ -505,13 +505,15 @@ static int populate_groups(struct config_group *group)
|
||||
int i;
|
||||
|
||||
if (group->default_groups) {
|
||||
/* FYI, we're faking mkdir here
|
||||
/*
|
||||
* FYI, we're faking mkdir here
|
||||
* I'm not sure we need this semaphore, as we're called
|
||||
* from our parent's mkdir. That holds our parent's
|
||||
* i_mutex, so afaik lookup cannot continue through our
|
||||
* parent to find us, let alone mess with our tree.
|
||||
* That said, taking our i_mutex is closer to mkdir
|
||||
* emulation, and shouldn't hurt. */
|
||||
* emulation, and shouldn't hurt.
|
||||
*/
|
||||
mutex_lock(&dentry->d_inode->i_mutex);
|
||||
|
||||
for (i = 0; group->default_groups[i]; i++) {
|
||||
@ -546,20 +548,34 @@ static void unlink_obj(struct config_item *item)
|
||||
|
||||
item->ci_group = NULL;
|
||||
item->ci_parent = NULL;
|
||||
|
||||
/* Drop the reference for ci_entry */
|
||||
config_item_put(item);
|
||||
|
||||
/* Drop the reference for ci_parent */
|
||||
config_group_put(group);
|
||||
}
|
||||
}
|
||||
|
||||
static void link_obj(struct config_item *parent_item, struct config_item *item)
|
||||
{
|
||||
/* Parent seems redundant with group, but it makes certain
|
||||
* traversals much nicer. */
|
||||
/*
|
||||
* Parent seems redundant with group, but it makes certain
|
||||
* traversals much nicer.
|
||||
*/
|
||||
item->ci_parent = parent_item;
|
||||
|
||||
/*
|
||||
* We hold a reference on the parent for the child's ci_parent
|
||||
* link.
|
||||
*/
|
||||
item->ci_group = config_group_get(to_config_group(parent_item));
|
||||
list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
|
||||
|
||||
/*
|
||||
* We hold a reference on the child for ci_entry on the parent's
|
||||
* cg_children
|
||||
*/
|
||||
config_item_get(item);
|
||||
}
|
||||
|
||||
@ -684,6 +700,10 @@ static void client_drop_item(struct config_item *parent_item,
|
||||
type = parent_item->ci_type;
|
||||
BUG_ON(!type);
|
||||
|
||||
/*
|
||||
* If ->drop_item() exists, it is responsible for the
|
||||
* config_item_put().
|
||||
*/
|
||||
if (type->ct_group_ops && type->ct_group_ops->drop_item)
|
||||
type->ct_group_ops->drop_item(to_config_group(parent_item),
|
||||
item);
|
||||
@ -694,23 +714,28 @@ static void client_drop_item(struct config_item *parent_item,
|
||||
|
||||
static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
||||
{
|
||||
int ret;
|
||||
int ret, module_got = 0;
|
||||
struct config_group *group;
|
||||
struct config_item *item;
|
||||
struct config_item *parent_item;
|
||||
struct configfs_subsystem *subsys;
|
||||
struct configfs_dirent *sd;
|
||||
struct config_item_type *type;
|
||||
struct module *owner;
|
||||
struct module *owner = NULL;
|
||||
char *name;
|
||||
|
||||
if (dentry->d_parent == configfs_sb->s_root)
|
||||
return -EPERM;
|
||||
if (dentry->d_parent == configfs_sb->s_root) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sd = dentry->d_parent->d_fsdata;
|
||||
if (!(sd->s_type & CONFIGFS_USET_DIR))
|
||||
return -EPERM;
|
||||
if (!(sd->s_type & CONFIGFS_USET_DIR)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Get a working ref for the duration of this function */
|
||||
parent_item = configfs_get_config_item(dentry->d_parent);
|
||||
type = parent_item->ci_type;
|
||||
subsys = to_config_group(parent_item)->cg_subsys;
|
||||
@ -719,15 +744,16 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
||||
if (!type || !type->ct_group_ops ||
|
||||
(!type->ct_group_ops->make_group &&
|
||||
!type->ct_group_ops->make_item)) {
|
||||
config_item_put(parent_item);
|
||||
return -EPERM; /* What lack-of-mkdir returns */
|
||||
ret = -EPERM; /* Lack-of-mkdir returns -EPERM */
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
|
||||
if (!name) {
|
||||
config_item_put(parent_item);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
|
||||
|
||||
down(&subsys->su_sem);
|
||||
@ -748,40 +774,67 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
||||
|
||||
kfree(name);
|
||||
if (!item) {
|
||||
config_item_put(parent_item);
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* If item == NULL, then link_obj() was never called.
|
||||
* There are no extra references to clean up.
|
||||
*/
|
||||
ret = -ENOMEM;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
/*
|
||||
* link_obj() has been called (via link_group() for groups).
|
||||
* From here on out, errors must clean that up.
|
||||
*/
|
||||
|
||||
type = item->ci_type;
|
||||
if (type) {
|
||||
owner = type->ct_owner;
|
||||
if (try_module_get(owner)) {
|
||||
if (group) {
|
||||
ret = configfs_attach_group(parent_item,
|
||||
item,
|
||||
dentry);
|
||||
} else {
|
||||
ret = configfs_attach_item(parent_item,
|
||||
item,
|
||||
dentry);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
down(&subsys->su_sem);
|
||||
if (group)
|
||||
unlink_group(group);
|
||||
else
|
||||
unlink_obj(item);
|
||||
client_drop_item(parent_item, item);
|
||||
up(&subsys->su_sem);
|
||||
|
||||
config_item_put(parent_item);
|
||||
module_put(owner);
|
||||
}
|
||||
}
|
||||
if (!type) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlink;
|
||||
}
|
||||
|
||||
owner = type->ct_owner;
|
||||
if (!try_module_get(owner)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlink;
|
||||
}
|
||||
|
||||
/*
|
||||
* I hate doing it this way, but if there is
|
||||
* an error, module_put() probably should
|
||||
* happen after any cleanup.
|
||||
*/
|
||||
module_got = 1;
|
||||
|
||||
if (group)
|
||||
ret = configfs_attach_group(parent_item, item, dentry);
|
||||
else
|
||||
ret = configfs_attach_item(parent_item, item, dentry);
|
||||
|
||||
out_unlink:
|
||||
if (ret) {
|
||||
/* Tear down everything we built up */
|
||||
down(&subsys->su_sem);
|
||||
if (group)
|
||||
unlink_group(group);
|
||||
else
|
||||
unlink_obj(item);
|
||||
client_drop_item(parent_item, item);
|
||||
up(&subsys->su_sem);
|
||||
|
||||
if (module_got)
|
||||
module_put(owner);
|
||||
}
|
||||
|
||||
out_put:
|
||||
/*
|
||||
* link_obj()/link_group() took a reference from child->parent,
|
||||
* so the parent is safely pinned. We can drop our working
|
||||
* reference.
|
||||
*/
|
||||
config_item_put(parent_item);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -801,6 +854,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
if (sd->s_type & CONFIGFS_USET_DEFAULT)
|
||||
return -EPERM;
|
||||
|
||||
/* Get a working ref until we have the child */
|
||||
parent_item = configfs_get_config_item(dentry->d_parent);
|
||||
subsys = to_config_group(parent_item)->cg_subsys;
|
||||
BUG_ON(!subsys);
|
||||
@ -817,6 +871,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get a working ref for the duration of this function */
|
||||
item = configfs_get_config_item(dentry);
|
||||
|
||||
/* Drop reference from above, item already holds one. */
|
||||
|
@ -438,7 +438,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
|
||||
if (c->mtd->point) {
|
||||
err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
|
||||
if (!err && retlen < tn->csize) {
|
||||
JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize);
|
||||
JFFS2_WARNING("MTD point returned len too short: %zu "
|
||||
"instead of %u.\n", retlen, tn->csize);
|
||||
c->mtd->unpoint(c->mtd, buffer, ofs, len);
|
||||
} else if (err)
|
||||
JFFS2_WARNING("MTD point failed: error code %d.\n", err);
|
||||
@ -461,7 +462,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
|
||||
}
|
||||
|
||||
if (retlen != len) {
|
||||
JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len);
|
||||
JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n",
|
||||
ofs, retlen, len);
|
||||
err = -EIO;
|
||||
goto free_out;
|
||||
}
|
||||
|
@ -899,13 +899,11 @@ static int do_change_type(struct nameidata *nd, int flag)
|
||||
/*
|
||||
* do loopback mount.
|
||||
*/
|
||||
static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags)
|
||||
static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
|
||||
{
|
||||
struct nameidata old_nd;
|
||||
struct vfsmount *mnt = NULL;
|
||||
int recurse = flags & MS_REC;
|
||||
int err = mount_is_safe(nd);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
if (!old_name || !*old_name)
|
||||
@ -939,7 +937,6 @@ static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags
|
||||
spin_unlock(&vfsmount_lock);
|
||||
release_mounts(&umount_list);
|
||||
}
|
||||
mnt->mnt_flags = mnt_flags;
|
||||
|
||||
out:
|
||||
up_write(&namespace_sem);
|
||||
@ -1353,7 +1350,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
|
||||
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
|
||||
data_page);
|
||||
else if (flags & MS_BIND)
|
||||
retval = do_loopback(&nd, dev_name, flags, mnt_flags);
|
||||
retval = do_loopback(&nd, dev_name, flags & MS_REC);
|
||||
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
|
||||
retval = do_change_type(&nd, flags);
|
||||
else if (flags & MS_MOVE)
|
||||
|
@ -276,13 +276,29 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This can also be called from ocfs2_write_zero_page() which has done
|
||||
* it's own cluster locking. */
|
||||
int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
int ret;
|
||||
|
||||
down_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
|
||||
ret = block_prepare_write(page, from, to, ocfs2_get_block);
|
||||
|
||||
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
|
||||
* from loopback. It must be able to perform its own locking around
|
||||
* ocfs2_get_block().
|
||||
*/
|
||||
int ocfs2_prepare_write(struct file *file, struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
static int ocfs2_prepare_write(struct file *file, struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
int ret;
|
||||
@ -295,11 +311,7 @@ int ocfs2_prepare_write(struct file *file, struct page *page,
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
|
||||
ret = block_prepare_write(page, from, to, ocfs2_get_block);
|
||||
|
||||
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
ret = ocfs2_prepare_write_nolock(inode, page, from, to);
|
||||
|
||||
ocfs2_meta_unlock(inode, 0);
|
||||
out:
|
||||
@ -625,11 +637,31 @@ static ssize_t ocfs2_direct_IO(int rw,
|
||||
int ret;
|
||||
|
||||
mlog_entry_void();
|
||||
|
||||
/*
|
||||
* We get PR data locks even for O_DIRECT. This allows
|
||||
* concurrent O_DIRECT I/O but doesn't let O_DIRECT with
|
||||
* extending and buffered zeroing writes race. If they did
|
||||
* race then the buffered zeroing could be written back after
|
||||
* the O_DIRECT I/O. It's one thing to tell people not to mix
|
||||
* buffered and O_DIRECT writes, but expecting them to
|
||||
* understand that file extension is also an implicit buffered
|
||||
* write is too much. By getting the PR we force writeback of
|
||||
* the buffered zeroing before proceeding.
|
||||
*/
|
||||
ret = ocfs2_data_lock(inode, 0);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
ocfs2_data_unlock(inode, 0);
|
||||
|
||||
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
|
||||
inode->i_sb->s_bdev, iov, offset,
|
||||
nr_segs,
|
||||
ocfs2_direct_IO_get_blocks,
|
||||
ocfs2_dio_end_io);
|
||||
out:
|
||||
mlog_exit(ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -22,8 +22,8 @@
|
||||
#ifndef OCFS2_AOPS_H
|
||||
#define OCFS2_AOPS_H
|
||||
|
||||
int ocfs2_prepare_write(struct file *file, struct page *page,
|
||||
unsigned from, unsigned to);
|
||||
int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
|
||||
unsigned from, unsigned to);
|
||||
|
||||
struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
|
||||
struct page *page,
|
||||
|
@ -569,7 +569,7 @@ static int ocfs2_extent_map_insert(struct inode *inode,
|
||||
|
||||
ret = -ENOMEM;
|
||||
ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
|
||||
GFP_KERNEL);
|
||||
GFP_NOFS);
|
||||
if (!ctxt.new_ent) {
|
||||
mlog_errno(ret);
|
||||
return ret;
|
||||
@ -583,14 +583,14 @@ static int ocfs2_extent_map_insert(struct inode *inode,
|
||||
if (ctxt.need_left && !ctxt.left_ent) {
|
||||
ctxt.left_ent =
|
||||
kmem_cache_alloc(ocfs2_em_ent_cachep,
|
||||
GFP_KERNEL);
|
||||
GFP_NOFS);
|
||||
if (!ctxt.left_ent)
|
||||
break;
|
||||
}
|
||||
if (ctxt.need_right && !ctxt.right_ent) {
|
||||
ctxt.right_ent =
|
||||
kmem_cache_alloc(ocfs2_em_ent_cachep,
|
||||
GFP_KERNEL);
|
||||
GFP_NOFS);
|
||||
if (!ctxt.right_ent)
|
||||
break;
|
||||
}
|
||||
|
@ -613,7 +613,8 @@ leave:
|
||||
|
||||
/* Some parts of this taken from generic_cont_expand, which turned out
|
||||
* to be too fragile to do exactly what we need without us having to
|
||||
* worry about recursive locking in ->commit_write(). */
|
||||
* worry about recursive locking in ->prepare_write() and
|
||||
* ->commit_write(). */
|
||||
static int ocfs2_write_zero_page(struct inode *inode,
|
||||
u64 size)
|
||||
{
|
||||
@ -641,7 +642,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ocfs2_prepare_write(NULL, page, offset, offset);
|
||||
ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out_unlock;
|
||||
@ -695,13 +696,26 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A tail_to_skip value > 0 indicates that we're being called from
|
||||
* ocfs2_file_aio_write(). This has the following implications:
|
||||
*
|
||||
* - we don't want to update i_size
|
||||
* - di_bh will be NULL, which is fine because it's only used in the
|
||||
* case where we want to update i_size.
|
||||
* - ocfs2_zero_extend() will then only be filling the hole created
|
||||
* between i_size and the start of the write.
|
||||
*/
|
||||
static int ocfs2_extend_file(struct inode *inode,
|
||||
struct buffer_head *di_bh,
|
||||
u64 new_i_size)
|
||||
u64 new_i_size,
|
||||
size_t tail_to_skip)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 clusters_to_add;
|
||||
|
||||
BUG_ON(!tail_to_skip && !di_bh);
|
||||
|
||||
/* setattr sometimes calls us like this. */
|
||||
if (new_i_size == 0)
|
||||
goto out;
|
||||
@ -714,27 +728,44 @@ static int ocfs2_extend_file(struct inode *inode,
|
||||
OCFS2_I(inode)->ip_clusters;
|
||||
|
||||
if (clusters_to_add) {
|
||||
/*
|
||||
* protect the pages that ocfs2_zero_extend is going to
|
||||
* be pulling into the page cache.. we do this before the
|
||||
* metadata extend so that we don't get into the situation
|
||||
* where we've extended the metadata but can't get the data
|
||||
* lock to zero.
|
||||
*/
|
||||
ret = ocfs2_data_lock(inode, 1);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ocfs2_extend_allocation(inode, clusters_to_add);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = ocfs2_zero_extend(inode, new_i_size);
|
||||
ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* No allocation required, we just use this helper to
|
||||
* do a trivial update of i_size. */
|
||||
ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!tail_to_skip) {
|
||||
/* We're being called from ocfs2_setattr() which wants
|
||||
* us to update i_size */
|
||||
ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
|
||||
if (ret < 0)
|
||||
mlog_errno(ret);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
if (clusters_to_add) /* this is the only case in which we lock */
|
||||
ocfs2_data_unlock(inode, 1);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -793,7 +824,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
if (i_size_read(inode) > attr->ia_size)
|
||||
status = ocfs2_truncate_file(inode, bh, attr->ia_size);
|
||||
else
|
||||
status = ocfs2_extend_file(inode, bh, attr->ia_size);
|
||||
status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
|
||||
if (status < 0) {
|
||||
if (status != -ENOSPC)
|
||||
mlog_errno(status);
|
||||
@ -1049,21 +1080,12 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
|
||||
if (!clusters)
|
||||
break;
|
||||
|
||||
ret = ocfs2_extend_allocation(inode, clusters);
|
||||
ret = ocfs2_extend_file(inode, NULL, newsize, count);
|
||||
if (ret < 0) {
|
||||
if (ret != -ENOSPC)
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Fill any holes which would've been created by this
|
||||
* write. If we're O_APPEND, this will wind up
|
||||
* (correctly) being a noop. */
|
||||
ret = ocfs2_zero_extend(inode, (u64) newsize - count);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1146,6 +1168,22 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
|
||||
ocfs2_iocb_set_rw_locked(iocb);
|
||||
}
|
||||
|
||||
/*
|
||||
* We're fine letting folks race truncates and extending
|
||||
* writes with read across the cluster, just like they can
|
||||
* locally. Hence no rw_lock during read.
|
||||
*
|
||||
* Take and drop the meta data lock to update inode fields
|
||||
* like i_size. This allows the checks down below
|
||||
* generic_file_aio_read() a chance of actually working.
|
||||
*/
|
||||
ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
ocfs2_meta_unlock(inode, 0);
|
||||
|
||||
ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos);
|
||||
if (ret == -EINVAL)
|
||||
mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
|
||||
|
@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb)
|
||||
{
|
||||
struct ocfs2_journal_handle *retval = NULL;
|
||||
|
||||
retval = kcalloc(1, sizeof(*retval), GFP_KERNEL);
|
||||
retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
|
||||
if (!retval) {
|
||||
mlog(ML_ERROR, "Failed to allocate memory for journal "
|
||||
"handle!\n");
|
||||
@ -870,9 +870,11 @@ static int ocfs2_force_read_journal(struct inode *inode)
|
||||
if (p_blocks > CONCURRENT_JOURNAL_FILL)
|
||||
p_blocks = CONCURRENT_JOURNAL_FILL;
|
||||
|
||||
/* We are reading journal data which should not
|
||||
* be put in the uptodate cache */
|
||||
status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb),
|
||||
p_blkno, p_blocks, bhs, 0,
|
||||
inode);
|
||||
NULL);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
@ -982,7 +984,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
|
||||
{
|
||||
struct ocfs2_la_recovery_item *item;
|
||||
|
||||
item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL);
|
||||
item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
|
||||
if (!item) {
|
||||
/* Though we wish to avoid it, we are in fact safe in
|
||||
* skipping local alloc cleanup as fsck.ocfs2 is more
|
||||
|
@ -337,7 +337,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
|
||||
(unsigned long long)oi->ip_blkno,
|
||||
(unsigned long long)block, expand_tree);
|
||||
|
||||
new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL);
|
||||
new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
|
||||
if (!new) {
|
||||
mlog_errno(-ENOMEM);
|
||||
return;
|
||||
@ -349,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
|
||||
* has no way of tracking that. */
|
||||
for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
|
||||
tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
|
||||
GFP_KERNEL);
|
||||
GFP_NOFS);
|
||||
if (!tree[i]) {
|
||||
mlog_errno(-ENOMEM);
|
||||
goto out_free;
|
||||
|
@ -586,7 +586,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response
|
||||
{
|
||||
struct ocfs2_net_wait_ctxt *w;
|
||||
|
||||
w = kcalloc(1, sizeof(*w), GFP_KERNEL);
|
||||
w = kcalloc(1, sizeof(*w), GFP_NOFS);
|
||||
if (!w) {
|
||||
mlog_errno(-ENOMEM);
|
||||
goto bail;
|
||||
@ -749,7 +749,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
|
||||
|
||||
BUG_ON(!ocfs2_is_valid_vote_request(type));
|
||||
|
||||
request = kcalloc(1, sizeof(*request), GFP_KERNEL);
|
||||
request = kcalloc(1, sizeof(*request), GFP_NOFS);
|
||||
if (!request) {
|
||||
mlog_errno(-ENOMEM);
|
||||
} else {
|
||||
@ -1129,7 +1129,7 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg,
|
||||
struct ocfs2_super *osb = data;
|
||||
struct ocfs2_vote_work *work;
|
||||
|
||||
work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL);
|
||||
work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS);
|
||||
if (!work) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user