mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 00:42:16 +00:00
Merge branch 'drm-intel-fixes' into drm-intel-next
This commit is contained in:
commit
df7976797f
@ -40,7 +40,6 @@ Features which NILFS2 does not support yet:
|
|||||||
- POSIX ACLs
|
- POSIX ACLs
|
||||||
- quotas
|
- quotas
|
||||||
- fsck
|
- fsck
|
||||||
- resize
|
|
||||||
- defragmentation
|
- defragmentation
|
||||||
|
|
||||||
Mount options
|
Mount options
|
||||||
|
@ -346,7 +346,7 @@ tcp_orphan_retries - INTEGER
|
|||||||
when RTO retransmissions remain unacknowledged.
|
when RTO retransmissions remain unacknowledged.
|
||||||
See tcp_retries2 for more details.
|
See tcp_retries2 for more details.
|
||||||
|
|
||||||
The default value is 7.
|
The default value is 8.
|
||||||
If your machine is a loaded WEB server,
|
If your machine is a loaded WEB server,
|
||||||
you should think about lowering this value, such sockets
|
you should think about lowering this value, such sockets
|
||||||
may consume significant resources. Cf. tcp_max_orphans.
|
may consume significant resources. Cf. tcp_max_orphans.
|
||||||
|
@ -674,7 +674,7 @@ Protocol: 2.10+
|
|||||||
|
|
||||||
Field name: init_size
|
Field name: init_size
|
||||||
Type: read
|
Type: read
|
||||||
Offset/size: 0x25c/4
|
Offset/size: 0x260/4
|
||||||
|
|
||||||
This field indicates the amount of linear contiguous memory starting
|
This field indicates the amount of linear contiguous memory starting
|
||||||
at the kernel runtime start address that the kernel needs before it
|
at the kernel runtime start address that the kernel needs before it
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 0
|
PATCHLEVEL = 0
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc7
|
EXTRAVERSION =
|
||||||
NAME = Sneaky Weasel
|
NAME = Sneaky Weasel
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -520,7 +520,7 @@ fail:
|
|||||||
*/
|
*/
|
||||||
if (have_imager()) {
|
if (have_imager()) {
|
||||||
label = "HD imager";
|
label = "HD imager";
|
||||||
mux |= 1;
|
mux |= 2;
|
||||||
|
|
||||||
/* externally mux MMC1/ENET/AIC33 to imager */
|
/* externally mux MMC1/ENET/AIC33 to imager */
|
||||||
mux |= BIT(6) | BIT(5) | BIT(3);
|
mux |= BIT(6) | BIT(5) | BIT(3);
|
||||||
@ -540,7 +540,7 @@ fail:
|
|||||||
resets &= ~BIT(1);
|
resets &= ~BIT(1);
|
||||||
|
|
||||||
if (have_tvp7002()) {
|
if (have_tvp7002()) {
|
||||||
mux |= 2;
|
mux |= 1;
|
||||||
resets &= ~BIT(2);
|
resets &= ~BIT(2);
|
||||||
label = "tvp7002 HD";
|
label = "tvp7002 HD";
|
||||||
} else {
|
} else {
|
||||||
|
@ -254,8 +254,10 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
|
|||||||
{
|
{
|
||||||
struct davinci_gpio_regs __iomem *g;
|
struct davinci_gpio_regs __iomem *g;
|
||||||
u32 mask = 0xffff;
|
u32 mask = 0xffff;
|
||||||
|
struct davinci_gpio_controller *d;
|
||||||
|
|
||||||
g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
|
d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
|
||||||
|
g = (struct davinci_gpio_regs __iomem *)d->regs;
|
||||||
|
|
||||||
/* we only care about one bank */
|
/* we only care about one bank */
|
||||||
if (irq & 1)
|
if (irq & 1)
|
||||||
@ -274,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
|
|||||||
if (!status)
|
if (!status)
|
||||||
break;
|
break;
|
||||||
__raw_writel(status, &g->intstat);
|
__raw_writel(status, &g->intstat);
|
||||||
if (irq & 1)
|
|
||||||
status >>= 16;
|
|
||||||
|
|
||||||
/* now demux them to the right lowlevel handler */
|
/* now demux them to the right lowlevel handler */
|
||||||
n = (int)irq_get_handler_data(irq);
|
n = d->irq_base;
|
||||||
|
if (irq & 1) {
|
||||||
|
n += 16;
|
||||||
|
status >>= 16;
|
||||||
|
}
|
||||||
|
|
||||||
while (status) {
|
while (status) {
|
||||||
res = ffs(status);
|
res = ffs(status);
|
||||||
n += res;
|
n += res;
|
||||||
@ -424,7 +429,13 @@ static int __init davinci_gpio_irq_setup(void)
|
|||||||
|
|
||||||
/* set up all irqs in this bank */
|
/* set up all irqs in this bank */
|
||||||
irq_set_chained_handler(bank_irq, gpio_irq_handler);
|
irq_set_chained_handler(bank_irq, gpio_irq_handler);
|
||||||
irq_set_handler_data(bank_irq, (__force void *)g);
|
|
||||||
|
/*
|
||||||
|
* Each chip handles 32 gpios, and each irq bank consists of 16
|
||||||
|
* gpio irqs. Pass the irq bank's corresponding controller to
|
||||||
|
* the chained irq handler.
|
||||||
|
*/
|
||||||
|
irq_set_handler_data(bank_irq, &chips[gpio / 32]);
|
||||||
|
|
||||||
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
|
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
|
||||||
irq_set_chip(irq, &gpio_irqchip);
|
irq_set_chip(irq, &gpio_irqchip);
|
||||||
|
@ -52,8 +52,14 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
|
|||||||
struct irq_chip_type *ct;
|
struct irq_chip_type *ct;
|
||||||
|
|
||||||
gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
|
gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
|
||||||
|
if (!gc) {
|
||||||
|
pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
|
||||||
|
__func__, irq_start);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ct = gc->chip_types;
|
ct = gc->chip_types;
|
||||||
ct->chip.irq_ack = irq_gc_ack;
|
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||||
|
|
||||||
|
@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void)
|
|||||||
/*
|
/*
|
||||||
* clocksource
|
* clocksource
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
|
||||||
|
{
|
||||||
|
return *IXP4XX_OSTS;
|
||||||
|
}
|
||||||
|
|
||||||
unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
|
unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
|
||||||
EXPORT_SYMBOL(ixp4xx_timer_freq);
|
EXPORT_SYMBOL(ixp4xx_timer_freq);
|
||||||
static void __init ixp4xx_clocksource_init(void)
|
static void __init ixp4xx_clocksource_init(void)
|
||||||
{
|
{
|
||||||
init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
|
init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
|
||||||
|
|
||||||
clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32,
|
clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
|
||||||
clocksource_mmio_readl_up);
|
ixp4xx_clocksource_read);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
|
|||||||
static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
|
static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
|
||||||
static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
|
static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
|
||||||
|
|
||||||
static APMU_CLK(nand, NAND, 0x01db, 208000000);
|
static APMU_CLK(nand, NAND, 0x19b, 156000000);
|
||||||
static APMU_CLK(lcd, LCD, 0x7f, 312000000);
|
static APMU_CLK(lcd, LCD, 0x7f, 312000000);
|
||||||
|
|
||||||
/* device and clock bindings */
|
/* device and clock bindings */
|
||||||
|
@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
|
|||||||
static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
|
static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
|
||||||
static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
|
static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
|
||||||
|
|
||||||
static APMU_CLK(nand, NAND, 0x01db, 208000000);
|
static APMU_CLK(nand, NAND, 0x19b, 156000000);
|
||||||
static APMU_CLK(u2o, USB, 0x1b, 480000000);
|
static APMU_CLK(u2o, USB, 0x1b, 480000000);
|
||||||
|
|
||||||
/* device and clock bindings */
|
/* device and clock bindings */
|
||||||
|
@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void)
|
|||||||
if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
|
if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
|
||||||
(GPDR(i) & GPIO_bit(i))) {
|
(GPDR(i) & GPIO_bit(i))) {
|
||||||
if (GPLR(i) & GPIO_bit(i))
|
if (GPLR(i) & GPIO_bit(i))
|
||||||
PGSR(i) |= GPIO_bit(i);
|
PGSR(gpio_to_bank(i)) |= GPIO_bit(i);
|
||||||
else
|
else
|
||||||
PGSR(i) &= ~GPIO_bit(i);
|
PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
|
|||||||
.xres = 480,
|
.xres = 480,
|
||||||
.yres = 272,
|
.yres = 272,
|
||||||
.bpp = 16,
|
.bpp = 16,
|
||||||
.hsync_len = 4,
|
.hsync_len = 41,
|
||||||
.left_margin = 2,
|
.left_margin = 2,
|
||||||
.right_margin = 1,
|
.right_margin = 1,
|
||||||
.vsync_len = 1,
|
.vsync_len = 10,
|
||||||
.upper_margin = 3,
|
.upper_margin = 3,
|
||||||
.lower_margin = 1,
|
.lower_margin = 1,
|
||||||
.sync = 0,
|
.sync = 0,
|
||||||
@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
|
|
||||||
|
|
||||||
/* Earlier devices had the backlight regulator controlled
|
|
||||||
* via PWM, later versions use another controller for that */
|
|
||||||
if ((system_rev & 0xff) < 2) {
|
|
||||||
mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
|
|
||||||
pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
|
|
||||||
platform_device_register(&raumfeld_pwm_backlight_device);
|
|
||||||
} else
|
|
||||||
platform_device_register(&raumfeld_lt3593_device);
|
|
||||||
|
|
||||||
ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
|
ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pr_warning("Unable to request GPIO_TFT_VA_EN\n");
|
pr_warning("Unable to request GPIO_TFT_VA_EN\n");
|
||||||
else
|
else
|
||||||
gpio_direction_output(GPIO_TFT_VA_EN, 1);
|
gpio_direction_output(GPIO_TFT_VA_EN, 1);
|
||||||
|
|
||||||
|
msleep(100);
|
||||||
|
|
||||||
ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
|
ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
|
pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
|
||||||
else
|
else
|
||||||
gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
|
gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
|
||||||
|
|
||||||
|
/* Hardware revision 2 has the backlight regulator controlled
|
||||||
|
* by an LT3593, earlier and later devices use PWM for that. */
|
||||||
|
if ((system_rev & 0xff) == 2) {
|
||||||
|
platform_device_register(&raumfeld_lt3593_device);
|
||||||
|
} else {
|
||||||
|
mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
|
||||||
|
pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
|
||||||
|
platform_device_register(&raumfeld_pwm_backlight_device);
|
||||||
|
}
|
||||||
|
|
||||||
|
pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
|
||||||
platform_device_register(&pxa3xx_device_gcu);
|
platform_device_register(&pxa3xx_device_gcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -657,7 +659,7 @@ static struct lis3lv02d_platform_data lis3_pdata = {
|
|||||||
|
|
||||||
#define SPI_AK4104 \
|
#define SPI_AK4104 \
|
||||||
{ \
|
{ \
|
||||||
.modalias = "ak4104", \
|
.modalias = "ak4104-codec", \
|
||||||
.max_speed_hz = 10000, \
|
.max_speed_hz = 10000, \
|
||||||
.bus_num = 0, \
|
.bus_num = 0, \
|
||||||
.chip_select = 0, \
|
.chip_select = 0, \
|
||||||
|
@ -113,7 +113,7 @@ found:
|
|||||||
return chan;
|
return chan;
|
||||||
}
|
}
|
||||||
|
|
||||||
int s3c2410_dma_config(unsigned int channel, int xferunit)
|
int s3c2410_dma_config(enum dma_ch channel, int xferunit)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
|
|
||||||
@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
|
int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
|
|
||||||
@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_enqueue(unsigned int channel, void *id,
|
int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
|
||||||
dma_addr_t data, int size)
|
dma_addr_t data, int size)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
@ -415,7 +415,7 @@ err_buff:
|
|||||||
EXPORT_SYMBOL(s3c2410_dma_enqueue);
|
EXPORT_SYMBOL(s3c2410_dma_enqueue);
|
||||||
|
|
||||||
|
|
||||||
int s3c2410_dma_devconfig(unsigned int channel,
|
int s3c2410_dma_devconfig(enum dma_ch channel,
|
||||||
enum s3c2410_dmasrc source,
|
enum s3c2410_dmasrc source,
|
||||||
unsigned long devaddr)
|
unsigned long devaddr)
|
||||||
{
|
{
|
||||||
@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel,
|
|||||||
EXPORT_SYMBOL(s3c2410_dma_devconfig);
|
EXPORT_SYMBOL(s3c2410_dma_devconfig);
|
||||||
|
|
||||||
|
|
||||||
int s3c2410_dma_getposition(unsigned int channel,
|
int s3c2410_dma_getposition(enum dma_ch channel,
|
||||||
dma_addr_t *src, dma_addr_t *dst)
|
dma_addr_t *src, dma_addr_t *dst)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
|
|||||||
* get control of an dma channel
|
* get control of an dma channel
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_request(unsigned int channel,
|
int s3c2410_dma_request(enum dma_ch channel,
|
||||||
struct s3c2410_dma_client *client,
|
struct s3c2410_dma_client *client,
|
||||||
void *dev)
|
void *dev)
|
||||||
{
|
{
|
||||||
@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
|
|||||||
* allowed to go through.
|
* allowed to go through.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
|
int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
|
|||||||
ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
|
ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
|
||||||
ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
|
ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
|
||||||
ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
|
ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
|
||||||
ct->chip.irq_ack = irq_gc_ack;
|
ct->chip.irq_ack = irq_gc_ack_clr_bit;
|
||||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||||
ct->chip.irq_set_type = gpio_irq_set_type;
|
ct->chip.irq_set_type = gpio_irq_set_type;
|
||||||
|
@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
|
|||||||
return container_of(c, struct pxa_gpio_chip, chip)->regbase;
|
return container_of(c, struct pxa_gpio_chip, chip)->regbase;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio)
|
static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
|
||||||
{
|
{
|
||||||
return &pxa_gpio_chips[gpio_to_bank(gpio)];
|
return &pxa_gpio_chips[gpio_to_bank(gpio)];
|
||||||
}
|
}
|
||||||
@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
|
|||||||
int gpio = irq_to_gpio(d->irq);
|
int gpio = irq_to_gpio(d->irq);
|
||||||
unsigned long gpdr, mask = GPIO_bit(gpio);
|
unsigned long gpdr, mask = GPIO_bit(gpio);
|
||||||
|
|
||||||
c = gpio_to_chip(gpio);
|
c = gpio_to_pxachip(gpio);
|
||||||
|
|
||||||
if (type == IRQ_TYPE_PROBE) {
|
if (type == IRQ_TYPE_PROBE) {
|
||||||
/* Don't mess with enabled GPIOs using preconfigured edges or
|
/* Don't mess with enabled GPIOs using preconfigured edges or
|
||||||
@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
|
|||||||
static void pxa_ack_muxed_gpio(struct irq_data *d)
|
static void pxa_ack_muxed_gpio(struct irq_data *d)
|
||||||
{
|
{
|
||||||
int gpio = irq_to_gpio(d->irq);
|
int gpio = irq_to_gpio(d->irq);
|
||||||
struct pxa_gpio_chip *c = gpio_to_chip(gpio);
|
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
|
||||||
|
|
||||||
__raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
|
__raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
|
||||||
}
|
}
|
||||||
@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d)
|
|||||||
static void pxa_mask_muxed_gpio(struct irq_data *d)
|
static void pxa_mask_muxed_gpio(struct irq_data *d)
|
||||||
{
|
{
|
||||||
int gpio = irq_to_gpio(d->irq);
|
int gpio = irq_to_gpio(d->irq);
|
||||||
struct pxa_gpio_chip *c = gpio_to_chip(gpio);
|
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
|
||||||
uint32_t grer, gfer;
|
uint32_t grer, gfer;
|
||||||
|
|
||||||
c->irq_mask &= ~GPIO_bit(gpio);
|
c->irq_mask &= ~GPIO_bit(gpio);
|
||||||
@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
|
|||||||
static void pxa_unmask_muxed_gpio(struct irq_data *d)
|
static void pxa_unmask_muxed_gpio(struct irq_data *d)
|
||||||
{
|
{
|
||||||
int gpio = irq_to_gpio(d->irq);
|
int gpio = irq_to_gpio(d->irq);
|
||||||
struct pxa_gpio_chip *c = gpio_to_chip(gpio);
|
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
|
||||||
|
|
||||||
c->irq_mask |= GPIO_bit(gpio);
|
c->irq_mask |= GPIO_bit(gpio);
|
||||||
update_edge_detect(c);
|
update_edge_detect(c);
|
||||||
|
@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
|
|||||||
* get control of an dma channel
|
* get control of an dma channel
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_request(unsigned int channel,
|
int s3c2410_dma_request(enum dma_ch channel,
|
||||||
struct s3c2410_dma_client *client,
|
struct s3c2410_dma_client *client,
|
||||||
void *dev)
|
void *dev)
|
||||||
{
|
{
|
||||||
@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
|
|||||||
* allowed to go through.
|
* allowed to go through.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
|
int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
|
s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
|
|
||||||
@ -1021,7 +1021,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
|
|||||||
* xfersize: size of unit in bytes (1,2,4)
|
* xfersize: size of unit in bytes (1,2,4)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_config(unsigned int channel,
|
int s3c2410_dma_config(enum dma_ch channel,
|
||||||
int xferunit)
|
int xferunit)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
@ -1100,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
|
|||||||
* devaddr: physical address of the source
|
* devaddr: physical address of the source
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_devconfig(unsigned int channel,
|
int s3c2410_dma_devconfig(enum dma_ch channel,
|
||||||
enum s3c2410_dmasrc source,
|
enum s3c2410_dmasrc source,
|
||||||
unsigned long devaddr)
|
unsigned long devaddr)
|
||||||
{
|
{
|
||||||
@ -1173,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig);
|
|||||||
* returns the current transfer points for the dma source and destination
|
* returns the current transfer points for the dma source and destination
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst)
|
int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
|
|||||||
if (!gc)
|
if (!gc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
ct = gc->chip_types;
|
ct = gc->chip_types;
|
||||||
ct->chip.irq_ack = irq_gc_ack;
|
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||||
ct->chip.irq_mask = irq_gc_mask_set_bit;
|
ct->chip.irq_mask = irq_gc_mask_set_bit;
|
||||||
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
|
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
|
||||||
ct->chip.irq_set_type = s5p_gpioint_set_type,
|
ct->chip.irq_set_type = s5p_gpioint_set_type,
|
||||||
|
@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel)
|
|||||||
* irq?
|
* irq?
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
|
int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(s3c2410_dma_set_opfn);
|
EXPORT_SYMBOL(s3c2410_dma_set_opfn);
|
||||||
|
|
||||||
int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
|
int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
|
EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
|
||||||
|
|
||||||
int s3c2410_dma_setflags(unsigned int channel, unsigned int flags)
|
int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@ struct s3c2410_dma_client {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct s3c2410_dma_chan;
|
struct s3c2410_dma_chan;
|
||||||
|
enum dma_ch;
|
||||||
|
|
||||||
/* s3c2410_dma_cbfn_t
|
/* s3c2410_dma_cbfn_t
|
||||||
*
|
*
|
||||||
@ -62,7 +63,7 @@ typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *,
|
|||||||
* request a dma channel exclusivley
|
* request a dma channel exclusivley
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_request(unsigned int channel,
|
extern int s3c2410_dma_request(enum dma_ch channel,
|
||||||
struct s3c2410_dma_client *, void *dev);
|
struct s3c2410_dma_client *, void *dev);
|
||||||
|
|
||||||
|
|
||||||
@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel,
|
|||||||
* change the state of the dma channel
|
* change the state of the dma channel
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op);
|
extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
|
||||||
|
|
||||||
/* s3c2410_dma_setflags
|
/* s3c2410_dma_setflags
|
||||||
*
|
*
|
||||||
* set the channel's flags to a given state
|
* set the channel's flags to a given state
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_setflags(unsigned int channel,
|
extern int s3c2410_dma_setflags(enum dma_ch channel,
|
||||||
unsigned int flags);
|
unsigned int flags);
|
||||||
|
|
||||||
/* s3c2410_dma_free
|
/* s3c2410_dma_free
|
||||||
@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel,
|
|||||||
* free the dma channel (will also abort any outstanding operations)
|
* free the dma channel (will also abort any outstanding operations)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
|
extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
|
||||||
|
|
||||||
/* s3c2410_dma_enqueue
|
/* s3c2410_dma_enqueue
|
||||||
*
|
*
|
||||||
@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
|
|||||||
* drained before the buffer is given to the DMA system.
|
* drained before the buffer is given to the DMA system.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
|
extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
|
||||||
dma_addr_t data, int size);
|
dma_addr_t data, int size);
|
||||||
|
|
||||||
/* s3c2410_dma_config
|
/* s3c2410_dma_config
|
||||||
@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
|
|||||||
* configure the dma channel
|
* configure the dma channel
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_config(unsigned int channel, int xferunit);
|
extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
|
||||||
|
|
||||||
/* s3c2410_dma_devconfig
|
/* s3c2410_dma_devconfig
|
||||||
*
|
*
|
||||||
* configure the device we're talking to
|
* configure the device we're talking to
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_devconfig(unsigned int channel,
|
extern int s3c2410_dma_devconfig(enum dma_ch channel,
|
||||||
enum s3c2410_dmasrc source, unsigned long devaddr);
|
enum s3c2410_dmasrc source, unsigned long devaddr);
|
||||||
|
|
||||||
/* s3c2410_dma_getposition
|
/* s3c2410_dma_getposition
|
||||||
@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel,
|
|||||||
* get the position that the dma transfer is currently at
|
* get the position that the dma transfer is currently at
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int s3c2410_dma_getposition(unsigned int channel,
|
extern int s3c2410_dma_getposition(enum dma_ch channel,
|
||||||
dma_addr_t *src, dma_addr_t *dest);
|
dma_addr_t *src, dma_addr_t *dest);
|
||||||
|
|
||||||
extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn);
|
extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
|
||||||
extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn);
|
extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
|
||||||
|
|
||||||
|
|
||||||
|
@ -54,8 +54,15 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
|
|||||||
|
|
||||||
gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
|
gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
|
||||||
handle_level_irq);
|
handle_level_irq);
|
||||||
|
|
||||||
|
if (!gc) {
|
||||||
|
pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
|
||||||
|
__func__, uirq->base_irq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ct = gc->chip_types;
|
ct = gc->chip_types;
|
||||||
ct->chip.irq_ack = irq_gc_ack;
|
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||||
ct->chip.irq_mask = irq_gc_mask_set_bit;
|
ct->chip.irq_mask = irq_gc_mask_set_bit;
|
||||||
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
|
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
|
||||||
ct->regs.ack = S3C64XX_UINTP;
|
ct->regs.ack = S3C64XX_UINTP;
|
||||||
|
@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq)
|
|||||||
|
|
||||||
s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
|
s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
|
||||||
S3C64XX_TINT_CSTAT, handle_level_irq);
|
S3C64XX_TINT_CSTAT, handle_level_irq);
|
||||||
|
|
||||||
|
if (!s3c_tgc) {
|
||||||
|
pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n",
|
||||||
|
__func__, timer_irq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ct = s3c_tgc->chip_types;
|
ct = s3c_tgc->chip_types;
|
||||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/sysdev.h>
|
#include <linux/syscore_ops.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
|
||||||
#include <asm/i8259.h>
|
#include <asm/i8259.h>
|
||||||
@ -215,14 +215,13 @@ spurious_8259A_irq:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i8259A_resume(struct sys_device *dev)
|
static void i8259A_resume(void)
|
||||||
{
|
{
|
||||||
if (i8259A_auto_eoi >= 0)
|
if (i8259A_auto_eoi >= 0)
|
||||||
init_8259A(i8259A_auto_eoi);
|
init_8259A(i8259A_auto_eoi);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i8259A_shutdown(struct sys_device *dev)
|
static void i8259A_shutdown(void)
|
||||||
{
|
{
|
||||||
/* Put the i8259A into a quiescent state that
|
/* Put the i8259A into a quiescent state that
|
||||||
* the kernel initialization code can get it
|
* the kernel initialization code can get it
|
||||||
@ -232,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev)
|
|||||||
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
|
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
|
||||||
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
|
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sysdev_class i8259_sysdev_class = {
|
static struct syscore_ops i8259_syscore_ops = {
|
||||||
.name = "i8259",
|
|
||||||
.resume = i8259A_resume,
|
.resume = i8259A_resume,
|
||||||
.shutdown = i8259A_shutdown,
|
.shutdown = i8259A_shutdown,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct sys_device device_i8259A = {
|
|
||||||
.id = 0,
|
|
||||||
.cls = &i8259_sysdev_class,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init i8259A_init_sysfs(void)
|
static int __init i8259A_init_sysfs(void)
|
||||||
{
|
{
|
||||||
int error = sysdev_class_register(&i8259_sysdev_class);
|
register_syscore_ops(&i8259_syscore_ops);
|
||||||
if (!error)
|
return 0;
|
||||||
error = sysdev_register(&device_i8259A);
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
device_initcall(i8259A_init_sysfs);
|
device_initcall(i8259A_init_sysfs);
|
||||||
|
@ -12,6 +12,8 @@
|
|||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/memory.h>
|
||||||
|
|
||||||
#include <asm/firmware.h>
|
#include <asm/firmware.h>
|
||||||
#include <asm/machdep.h>
|
#include <asm/machdep.h>
|
||||||
#include <asm/pSeries_reconfig.h>
|
#include <asm/pSeries_reconfig.h>
|
||||||
@ -20,24 +22,25 @@
|
|||||||
static unsigned long get_memblock_size(void)
|
static unsigned long get_memblock_size(void)
|
||||||
{
|
{
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
unsigned int memblock_size = 0;
|
unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
|
||||||
|
struct resource r;
|
||||||
|
|
||||||
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
|
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
|
||||||
if (np) {
|
if (np) {
|
||||||
const unsigned long *size;
|
const __be64 *size;
|
||||||
|
|
||||||
size = of_get_property(np, "ibm,lmb-size", NULL);
|
size = of_get_property(np, "ibm,lmb-size", NULL);
|
||||||
memblock_size = size ? *size : 0;
|
if (size)
|
||||||
|
memblock_size = be64_to_cpup(size);
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
} else {
|
} else if (machine_is(pseries)) {
|
||||||
|
/* This fallback really only applies to pseries */
|
||||||
unsigned int memzero_size = 0;
|
unsigned int memzero_size = 0;
|
||||||
const unsigned int *regs;
|
|
||||||
|
|
||||||
np = of_find_node_by_path("/memory@0");
|
np = of_find_node_by_path("/memory@0");
|
||||||
if (np) {
|
if (np) {
|
||||||
regs = of_get_property(np, "reg", NULL);
|
if (!of_address_to_resource(np, 0, &r))
|
||||||
memzero_size = regs ? regs[3] : 0;
|
memzero_size = resource_size(&r);
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
|
|||||||
sprintf(buf, "/memory@%x", memzero_size);
|
sprintf(buf, "/memory@%x", memzero_size);
|
||||||
np = of_find_node_by_path(buf);
|
np = of_find_node_by_path(buf);
|
||||||
if (np) {
|
if (np) {
|
||||||
regs = of_get_property(np, "reg", NULL);
|
if (!of_address_to_resource(np, 0, &r))
|
||||||
memblock_size = regs ? regs[3] : 0;
|
memblock_size = resource_size(&r);
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return memblock_size;
|
return memblock_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* WARNING: This is going to override the generic definition whenever
|
||||||
|
* pseries is built-in regardless of what platform is active at boot
|
||||||
|
* time. This is fine for now as this is the only "option" and it
|
||||||
|
* should work everywhere. If not, we'll have to turn this into a
|
||||||
|
* ppc_md. callback
|
||||||
|
*/
|
||||||
unsigned long memory_block_size_bytes(void)
|
unsigned long memory_block_size_bytes(void)
|
||||||
{
|
{
|
||||||
return get_memblock_size();
|
return get_memblock_size();
|
||||||
|
@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long);
|
|||||||
extern unsigned long arch_local_irq_save(void);
|
extern unsigned long arch_local_irq_save(void);
|
||||||
extern void arch_local_irq_enable(void);
|
extern void arch_local_irq_enable(void);
|
||||||
|
|
||||||
static inline unsigned long arch_local_save_flags(void)
|
static inline notrace unsigned long arch_local_save_flags(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void)
|
|||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_local_irq_disable(void)
|
static inline notrace void arch_local_irq_disable(void)
|
||||||
{
|
{
|
||||||
arch_local_irq_save();
|
arch_local_irq_save();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool arch_irqs_disabled_flags(unsigned long flags)
|
static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
|
||||||
{
|
{
|
||||||
return (flags & PSR_PIL) != 0;
|
return (flags & PSR_PIL) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool arch_irqs_disabled(void)
|
static inline notrace bool arch_irqs_disabled(void)
|
||||||
{
|
{
|
||||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
static inline unsigned long arch_local_save_flags(void)
|
static inline notrace unsigned long arch_local_save_flags(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void)
|
|||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_local_irq_restore(unsigned long flags)
|
static inline notrace void arch_local_irq_restore(unsigned long flags)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"wrpr %0, %%pil"
|
"wrpr %0, %%pil"
|
||||||
@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_local_irq_disable(void)
|
static inline notrace void arch_local_irq_disable(void)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"wrpr %0, %%pil"
|
"wrpr %0, %%pil"
|
||||||
@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void)
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_local_irq_enable(void)
|
static inline notrace void arch_local_irq_enable(void)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"wrpr 0, %%pil"
|
"wrpr 0, %%pil"
|
||||||
@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void)
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
|
||||||
{
|
{
|
||||||
return (flags > 0);
|
return (flags > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int arch_irqs_disabled(void)
|
static inline notrace int arch_irqs_disabled(void)
|
||||||
{
|
{
|
||||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long arch_local_irq_save(void)
|
static inline notrace unsigned long arch_local_irq_save(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, tmp;
|
unsigned long flags, tmp;
|
||||||
|
|
||||||
|
@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \
|
|||||||
} while (0)
|
} while (0)
|
||||||
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
|
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
|
||||||
#define instruction_pointer(regs) ((regs)->tpc)
|
#define instruction_pointer(regs) ((regs)->tpc)
|
||||||
|
#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
|
||||||
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
|
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
|
||||||
#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
|
#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -293,7 +293,7 @@ maybe_smp4m_msg:
|
|||||||
WRITE_PAUSE
|
WRITE_PAUSE
|
||||||
wr %l4, PSR_ET, %psr
|
wr %l4, PSR_ET, %psr
|
||||||
WRITE_PAUSE
|
WRITE_PAUSE
|
||||||
sll %o3, 28, %o2 ! shift for simpler checks below
|
srl %o3, 28, %o2 ! shift for simpler checks below
|
||||||
maybe_smp4m_msg_check_single:
|
maybe_smp4m_msg_check_single:
|
||||||
andcc %o2, 0x1, %g0
|
andcc %o2, 0x1, %g0
|
||||||
beq,a maybe_smp4m_msg_check_mask
|
beq,a maybe_smp4m_msg_check_mask
|
||||||
|
@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs)
|
|||||||
* Leon2 and Leon3 differ in their way of telling cache information
|
* Leon2 and Leon3 differ in their way of telling cache information
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int leon_flush_needed(void)
|
int __init leon_flush_needed(void)
|
||||||
{
|
{
|
||||||
int flush_needed = -1;
|
int flush_needed = -1;
|
||||||
unsigned int ssize, sets;
|
unsigned int ssize, sets;
|
||||||
|
@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
|
|||||||
config AMD_NUMA
|
config AMD_NUMA
|
||||||
def_bool y
|
def_bool y
|
||||||
prompt "Old style AMD Opteron NUMA detection"
|
prompt "Old style AMD Opteron NUMA detection"
|
||||||
depends on NUMA && PCI
|
depends on X86_64 && NUMA && PCI
|
||||||
---help---
|
---help---
|
||||||
Enable AMD NUMA node topology detection. You should say Y here if
|
Enable AMD NUMA node topology detection. You should say Y here if
|
||||||
you have a multi processor AMD system. This uses an old method to
|
you have a multi processor AMD system. This uses an old method to
|
||||||
|
@ -419,6 +419,30 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{ /* Handle problems with rebooting on the Latitude E6320. */
|
||||||
|
.callback = set_pci_reboot,
|
||||||
|
.ident = "Dell Latitude E6320",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ /* Handle problems with rebooting on the Latitude E5420. */
|
||||||
|
.callback = set_pci_reboot,
|
||||||
|
.ident = "Dell Latitude E5420",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ /* Handle problems with rebooting on the Latitude E6420. */
|
||||||
|
.callback = set_pci_reboot,
|
||||||
|
.ident = "Dell Latitude E6420",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include <linux/poison.h>
|
#include <linux/poison.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/memory.h>
|
||||||
#include <linux/memory_hotplug.h>
|
#include <linux/memory_hotplug.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_UV
|
#ifdef CONFIG_X86_UV
|
||||||
#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
|
|
||||||
|
|
||||||
unsigned long memory_block_size_bytes(void)
|
unsigned long memory_block_size_bytes(void)
|
||||||
{
|
{
|
||||||
if (is_uv_system()) {
|
if (is_uv_system()) {
|
||||||
|
@ -139,13 +139,23 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
|
|||||||
{
|
{
|
||||||
struct platform_device *ghes_dev;
|
struct platform_device *ghes_dev;
|
||||||
struct ghes_arr *ghes_arr = data;
|
struct ghes_arr *ghes_arr = data;
|
||||||
int rc;
|
int rc, i;
|
||||||
|
|
||||||
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
|
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
|
if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
for (i = 0; i < ghes_arr->count; i++) {
|
||||||
|
struct acpi_hest_header *hdr;
|
||||||
|
ghes_dev = ghes_arr->ghes_devs[i];
|
||||||
|
hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
|
||||||
|
if (hdr->source_id == hest_hdr->source_id) {
|
||||||
|
pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
|
||||||
|
hdr->source_id);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
}
|
||||||
ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
|
ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
|
||||||
if (!ghes_dev)
|
if (!ghes_dev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1332,23 +1332,6 @@ int acpi_resources_are_enforced(void)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(acpi_resources_are_enforced);
|
EXPORT_SYMBOL(acpi_resources_are_enforced);
|
||||||
|
|
||||||
/*
|
|
||||||
* Create and initialize a spinlock.
|
|
||||||
*/
|
|
||||||
acpi_status
|
|
||||||
acpi_os_create_lock(acpi_spinlock *out_handle)
|
|
||||||
{
|
|
||||||
spinlock_t *lock;
|
|
||||||
|
|
||||||
lock = ACPI_ALLOCATE(sizeof(spinlock_t));
|
|
||||||
if (!lock)
|
|
||||||
return AE_NO_MEMORY;
|
|
||||||
spin_lock_init(lock);
|
|
||||||
*out_handle = lock;
|
|
||||||
|
|
||||||
return AE_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Deallocate the memory for a spinlock.
|
* Deallocate the memory for a spinlock.
|
||||||
*/
|
*/
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
static DEFINE_MUTEX(mem_sysfs_mutex);
|
static DEFINE_MUTEX(mem_sysfs_mutex);
|
||||||
|
|
||||||
#define MEMORY_CLASS_NAME "memory"
|
#define MEMORY_CLASS_NAME "memory"
|
||||||
#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
|
|
||||||
|
|
||||||
static int sections_per_block;
|
static int sections_per_block;
|
||||||
|
|
||||||
|
@ -90,9 +90,10 @@
|
|||||||
#define G4x_GMCH_SIZE_MASK (0xf << 8)
|
#define G4x_GMCH_SIZE_MASK (0xf << 8)
|
||||||
#define G4x_GMCH_SIZE_1M (0x1 << 8)
|
#define G4x_GMCH_SIZE_1M (0x1 << 8)
|
||||||
#define G4x_GMCH_SIZE_2M (0x3 << 8)
|
#define G4x_GMCH_SIZE_2M (0x3 << 8)
|
||||||
#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
|
#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
|
||||||
#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
|
#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
|
||||||
#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
|
#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
|
||||||
|
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
|
||||||
|
|
||||||
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
|
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
|
||||||
|
|
||||||
|
@ -180,6 +180,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
|
|||||||
break;
|
break;
|
||||||
case WM831X_GPIO_PULL_UP:
|
case WM831X_GPIO_PULL_UP:
|
||||||
pull = "pullup";
|
pull = "pullup";
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
pull = "INVALID PULL";
|
pull = "INVALID PULL";
|
||||||
break;
|
break;
|
||||||
|
@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
|
|||||||
static int i915_init_phys_hws(struct drm_device *dev)
|
static int i915_init_phys_hws(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
|
||||||
|
|
||||||
/* Program Hardware Status Page */
|
/* Program Hardware Status Page */
|
||||||
dev_priv->status_page_dmah =
|
dev_priv->status_page_dmah =
|
||||||
@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev)
|
|||||||
DRM_ERROR("Can not allocate hardware status page\n");
|
DRM_ERROR("Can not allocate hardware status page\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
ring->status_page.page_addr =
|
|
||||||
(void __force __iomem *)dev_priv->status_page_dmah->vaddr;
|
|
||||||
|
|
||||||
memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
|
memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
|
||||||
|
0, PAGE_SIZE);
|
||||||
|
|
||||||
i915_write_hws_pga(dev);
|
i915_write_hws_pga(dev);
|
||||||
|
|
||||||
|
@ -264,6 +264,7 @@ enum intel_pch {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define QUIRK_PIPEA_FORCE (1<<0)
|
#define QUIRK_PIPEA_FORCE (1<<0)
|
||||||
|
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
|
||||||
|
|
||||||
struct intel_fbdev;
|
struct intel_fbdev;
|
||||||
struct intel_fbc_work;
|
struct intel_fbc_work;
|
||||||
@ -1199,7 +1200,9 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
|
|||||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||||
|
|
||||||
uint32_t
|
uint32_t
|
||||||
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
|
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
|
||||||
|
uint32_t size,
|
||||||
|
int tiling_mode);
|
||||||
|
|
||||||
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||||
enum i915_cache_level cache_level);
|
enum i915_cache_level cache_level);
|
||||||
|
@ -1374,25 +1374,24 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t
|
static uint32_t
|
||||||
i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
|
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->base.dev;
|
uint32_t gtt_size;
|
||||||
uint32_t size;
|
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 4 ||
|
if (INTEL_INFO(dev)->gen >= 4 ||
|
||||||
obj->tiling_mode == I915_TILING_NONE)
|
tiling_mode == I915_TILING_NONE)
|
||||||
return obj->base.size;
|
return size;
|
||||||
|
|
||||||
/* Previous chips need a power-of-two fence region when tiling */
|
/* Previous chips need a power-of-two fence region when tiling */
|
||||||
if (INTEL_INFO(dev)->gen == 3)
|
if (INTEL_INFO(dev)->gen == 3)
|
||||||
size = 1024*1024;
|
gtt_size = 1024*1024;
|
||||||
else
|
else
|
||||||
size = 512*1024;
|
gtt_size = 512*1024;
|
||||||
|
|
||||||
while (size < obj->base.size)
|
while (gtt_size < size)
|
||||||
size <<= 1;
|
gtt_size <<= 1;
|
||||||
|
|
||||||
return size;
|
return gtt_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1403,59 +1402,52 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
|
|||||||
* potential fence register mapping.
|
* potential fence register mapping.
|
||||||
*/
|
*/
|
||||||
static uint32_t
|
static uint32_t
|
||||||
i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
|
i915_gem_get_gtt_alignment(struct drm_device *dev,
|
||||||
|
uint32_t size,
|
||||||
|
int tiling_mode)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->base.dev;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minimum alignment is 4k (GTT page size), but might be greater
|
* Minimum alignment is 4k (GTT page size), but might be greater
|
||||||
* if a fence register is needed for the object.
|
* if a fence register is needed for the object.
|
||||||
*/
|
*/
|
||||||
if (INTEL_INFO(dev)->gen >= 4 ||
|
if (INTEL_INFO(dev)->gen >= 4 ||
|
||||||
obj->tiling_mode == I915_TILING_NONE)
|
tiling_mode == I915_TILING_NONE)
|
||||||
return 4096;
|
return 4096;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Previous chips need to be aligned to the size of the smallest
|
* Previous chips need to be aligned to the size of the smallest
|
||||||
* fence register that can contain the object.
|
* fence register that can contain the object.
|
||||||
*/
|
*/
|
||||||
return i915_gem_get_gtt_size(obj);
|
return i915_gem_get_gtt_size(dev, size, tiling_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
|
* i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
|
||||||
* unfenced object
|
* unfenced object
|
||||||
* @obj: object to check
|
* @dev: the device
|
||||||
|
* @size: size of the object
|
||||||
|
* @tiling_mode: tiling mode of the object
|
||||||
*
|
*
|
||||||
* Return the required GTT alignment for an object, only taking into account
|
* Return the required GTT alignment for an object, only taking into account
|
||||||
* unfenced tiled surface requirements.
|
* unfenced tiled surface requirements.
|
||||||
*/
|
*/
|
||||||
uint32_t
|
uint32_t
|
||||||
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
|
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
|
||||||
|
uint32_t size,
|
||||||
|
int tiling_mode)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->base.dev;
|
|
||||||
int tile_height;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minimum alignment is 4k (GTT page size) for sane hw.
|
* Minimum alignment is 4k (GTT page size) for sane hw.
|
||||||
*/
|
*/
|
||||||
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
|
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
|
||||||
obj->tiling_mode == I915_TILING_NONE)
|
tiling_mode == I915_TILING_NONE)
|
||||||
return 4096;
|
return 4096;
|
||||||
|
|
||||||
/*
|
/* Previous hardware however needs to be aligned to a power-of-two
|
||||||
* Older chips need unfenced tiled buffers to be aligned to the left
|
* tile height. The simplest method for determining this is to reuse
|
||||||
* edge of an even tile row (where tile rows are counted as if the bo is
|
* the power-of-tile object size.
|
||||||
* placed in a fenced gtt region).
|
|
||||||
*/
|
*/
|
||||||
if (IS_GEN2(dev))
|
return i915_gem_get_gtt_size(dev, size, tiling_mode);
|
||||||
tile_height = 16;
|
|
||||||
else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
|
|
||||||
tile_height = 32;
|
|
||||||
else
|
|
||||||
tile_height = 8;
|
|
||||||
|
|
||||||
return tile_height * obj->stride * 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -2776,9 +2768,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
fence_size = i915_gem_get_gtt_size(obj);
|
fence_size = i915_gem_get_gtt_size(dev,
|
||||||
fence_alignment = i915_gem_get_gtt_alignment(obj);
|
obj->base.size,
|
||||||
unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
|
obj->tiling_mode);
|
||||||
|
fence_alignment = i915_gem_get_gtt_alignment(dev,
|
||||||
|
obj->base.size,
|
||||||
|
obj->tiling_mode);
|
||||||
|
unfenced_alignment =
|
||||||
|
i915_gem_get_unfenced_gtt_alignment(dev,
|
||||||
|
obj->base.size,
|
||||||
|
obj->tiling_mode);
|
||||||
|
|
||||||
if (alignment == 0)
|
if (alignment == 0)
|
||||||
alignment = map_and_fenceable ? fence_alignment :
|
alignment = map_and_fenceable ? fence_alignment :
|
||||||
|
@ -348,7 +348,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||||||
/* Rebind if we need a change of alignment */
|
/* Rebind if we need a change of alignment */
|
||||||
if (!obj->map_and_fenceable) {
|
if (!obj->map_and_fenceable) {
|
||||||
u32 unfenced_alignment =
|
u32 unfenced_alignment =
|
||||||
i915_gem_get_unfenced_gtt_alignment(obj);
|
i915_gem_get_unfenced_gtt_alignment(dev,
|
||||||
|
obj->base.size,
|
||||||
|
args->tiling_mode);
|
||||||
if (obj->gtt_offset & (unfenced_alignment - 1))
|
if (obj->gtt_offset & (unfenced_alignment - 1))
|
||||||
ret = i915_gem_object_unbind(obj);
|
ret = i915_gem_object_unbind(obj);
|
||||||
}
|
}
|
||||||
|
@ -2865,14 +2865,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||||||
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
|
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On ILK+ LUT must be loaded before the pipe is running but with
|
||||||
|
* clocks enabled
|
||||||
|
*/
|
||||||
|
intel_crtc_load_lut(crtc);
|
||||||
|
|
||||||
intel_enable_pipe(dev_priv, pipe, is_pch_port);
|
intel_enable_pipe(dev_priv, pipe, is_pch_port);
|
||||||
intel_enable_plane(dev_priv, plane, pipe);
|
intel_enable_plane(dev_priv, plane, pipe);
|
||||||
|
|
||||||
if (is_pch_port)
|
if (is_pch_port)
|
||||||
ironlake_pch_enable(crtc);
|
ironlake_pch_enable(crtc);
|
||||||
|
|
||||||
intel_crtc_load_lut(crtc);
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
intel_update_fbc(dev);
|
intel_update_fbc(dev);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
@ -4469,7 +4473,8 @@ static void intel_update_watermarks(struct drm_device *dev)
|
|||||||
|
|
||||||
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
|
return dev_priv->lvds_use_ssc && i915_panel_use_ssc
|
||||||
|
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -8140,6 +8145,15 @@ static void quirk_pipea_force (struct drm_device *dev)
|
|||||||
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
|
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
|
||||||
|
*/
|
||||||
|
static void quirk_ssc_force_disable(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
|
||||||
|
}
|
||||||
|
|
||||||
struct intel_quirk {
|
struct intel_quirk {
|
||||||
int device;
|
int device;
|
||||||
int subsystem_vendor;
|
int subsystem_vendor;
|
||||||
@ -8168,6 +8182,9 @@ struct intel_quirk intel_quirks[] = {
|
|||||||
/* 855 & before need to leave pipe A & dpll A up */
|
/* 855 & before need to leave pipe A & dpll A up */
|
||||||
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
||||||
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
|
||||||
|
|
||||||
|
/* Lenovo U160 cannot use SSC on LVDS */
|
||||||
|
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
|
||||||
};
|
};
|
||||||
|
|
||||||
static void intel_init_quirks(struct drm_device *dev)
|
static void intel_init_quirks(struct drm_device *dev)
|
||||||
|
@ -1321,6 +1321,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|||||||
ring->get_seqno = pc_render_get_seqno;
|
ring->get_seqno = pc_render_get_seqno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!I915_NEED_GFX_HWS(dev))
|
||||||
|
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
|
||||||
|
|
||||||
ring->dev = dev;
|
ring->dev = dev;
|
||||||
INIT_LIST_HEAD(&ring->active_list);
|
INIT_LIST_HEAD(&ring->active_list);
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
|
@ -2000,7 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
|||||||
gb_backend_map = 0x66442200;
|
gb_backend_map = 0x66442200;
|
||||||
break;
|
break;
|
||||||
case CHIP_JUNIPER:
|
case CHIP_JUNIPER:
|
||||||
gb_backend_map = 0x00006420;
|
gb_backend_map = 0x00002200;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
gb_backend_map =
|
gb_backend_map =
|
||||||
|
@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* emits 36 */
|
/* emits 39 */
|
||||||
static void
|
static void
|
||||||
set_default_state(struct radeon_device *rdev)
|
set_default_state(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev)
|
|||||||
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
|
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||||
radeon_ring_write(rdev, 0);
|
radeon_ring_write(rdev, 0);
|
||||||
|
|
||||||
|
/* setup LDS */
|
||||||
|
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||||
|
radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||||
|
radeon_ring_write(rdev, 0x10001000);
|
||||||
|
|
||||||
/* SQ config */
|
/* SQ config */
|
||||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
|
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
|
||||||
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
|
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||||
@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
|
|||||||
/* calculate number of loops correctly */
|
/* calculate number of loops correctly */
|
||||||
ring_size = num_loops * dwords_per_loop;
|
ring_size = num_loops * dwords_per_loop;
|
||||||
/* set default + shaders */
|
/* set default + shaders */
|
||||||
ring_size += 52; /* shaders + def state */
|
ring_size += 55; /* shaders + def state */
|
||||||
ring_size += 10; /* fence emit for VB IB */
|
ring_size += 10; /* fence emit for VB IB */
|
||||||
ring_size += 5; /* done copy */
|
ring_size += 5; /* done copy */
|
||||||
ring_size += 10; /* fence emit for done copy */
|
ring_size += 10; /* fence emit for done copy */
|
||||||
|
@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
|
|||||||
|
|
||||||
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
|
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
|
||||||
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
||||||
bus_cntl = RREG32(RADEON_BUS_CNTL);
|
bus_cntl = RREG32(RV370_BUS_CNTL);
|
||||||
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
|
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
|
||||||
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
|
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
|
||||||
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
|
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
|
||||||
@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
|
|||||||
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
||||||
|
|
||||||
/* enable the rom */
|
/* enable the rom */
|
||||||
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
|
WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
|
||||||
|
|
||||||
/* Disable VGA mode */
|
/* Disable VGA mode */
|
||||||
WREG32(AVIVO_D1VGA_CONTROL,
|
WREG32(AVIVO_D1VGA_CONTROL,
|
||||||
@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
|
|||||||
/* restore regs */
|
/* restore regs */
|
||||||
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
|
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
|
||||||
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
||||||
WREG32(RADEON_BUS_CNTL, bus_cntl);
|
WREG32(RV370_BUS_CNTL, bus_cntl);
|
||||||
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
|
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
|
||||||
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
|
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
|
||||||
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
|
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
|
||||||
@ -390,6 +390,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
|
|||||||
|
|
||||||
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
|
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
|
||||||
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
||||||
|
if (rdev->flags & RADEON_IS_PCIE)
|
||||||
|
bus_cntl = RREG32(RV370_BUS_CNTL);
|
||||||
|
else
|
||||||
bus_cntl = RREG32(RADEON_BUS_CNTL);
|
bus_cntl = RREG32(RADEON_BUS_CNTL);
|
||||||
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
|
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
|
||||||
crtc2_gen_cntl = 0;
|
crtc2_gen_cntl = 0;
|
||||||
@ -412,6 +415,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
|
|||||||
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
||||||
|
|
||||||
/* enable the rom */
|
/* enable the rom */
|
||||||
|
if (rdev->flags & RADEON_IS_PCIE)
|
||||||
|
WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
|
||||||
|
else
|
||||||
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
|
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
|
||||||
|
|
||||||
/* Turn off mem requests and CRTC for both controllers */
|
/* Turn off mem requests and CRTC for both controllers */
|
||||||
@ -439,6 +445,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
|
|||||||
/* restore regs */
|
/* restore regs */
|
||||||
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
|
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
|
||||||
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
||||||
|
if (rdev->flags & RADEON_IS_PCIE)
|
||||||
|
WREG32(RV370_BUS_CNTL, bus_cntl);
|
||||||
|
else
|
||||||
WREG32(RADEON_BUS_CNTL, bus_cntl);
|
WREG32(RADEON_BUS_CNTL, bus_cntl);
|
||||||
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
|
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
|
||||||
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
|
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
|
||||||
|
@ -52,6 +52,12 @@ void radeon_connector_hotplug(struct drm_connector *connector)
|
|||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||||
|
|
||||||
|
/* bail if the connector does not have hpd pin, e.g.,
|
||||||
|
* VGA, TV, etc.
|
||||||
|
*/
|
||||||
|
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
|
||||||
|
return;
|
||||||
|
|
||||||
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
|
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
|
||||||
|
|
||||||
/* powering up/down the eDP panel generates hpd events which
|
/* powering up/down the eDP panel generates hpd events which
|
||||||
|
@ -300,6 +300,8 @@
|
|||||||
# define RADEON_BUS_READ_BURST (1 << 30)
|
# define RADEON_BUS_READ_BURST (1 << 30)
|
||||||
#define RADEON_BUS_CNTL1 0x0034
|
#define RADEON_BUS_CNTL1 0x0034
|
||||||
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
|
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
|
||||||
|
#define RV370_BUS_CNTL 0x004c
|
||||||
|
# define RV370_BUS_BIOS_DIS_ROM (1 << 2)
|
||||||
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
|
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
|
||||||
#define RADEON_MSI_REARM_EN 0x0160
|
#define RADEON_MSI_REARM_EN 0x0160
|
||||||
# define RV370_MSI_REARM_EN (1 << 0)
|
# define RV370_MSI_REARM_EN (1 << 0)
|
||||||
|
@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev)
|
|||||||
return radeon_gart_table_vram_alloc(rdev);
|
return radeon_gart_table_vram_alloc(rdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rs600_gart_enable(struct radeon_device *rdev)
|
static int rs600_gart_enable(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
int r, i;
|
int r, i;
|
||||||
@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
|
|||||||
return r;
|
return r;
|
||||||
radeon_gart_restore(rdev);
|
radeon_gart_restore(rdev);
|
||||||
/* Enable bus master */
|
/* Enable bus master */
|
||||||
tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
|
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
|
||||||
WREG32(R_00004C_BUS_CNTL, tmp);
|
WREG32(RADEON_BUS_CNTL, tmp);
|
||||||
/* FIXME: setup default page */
|
/* FIXME: setup default page */
|
||||||
WREG32_MC(R_000100_MC_PT0_CNTL,
|
WREG32_MC(R_000100_MC_PT0_CNTL,
|
||||||
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
|
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
|
||||||
|
@ -53,23 +53,23 @@ static int adm1275_probe(struct i2c_client *client,
|
|||||||
info->direct[PSC_VOLTAGE_IN] = true;
|
info->direct[PSC_VOLTAGE_IN] = true;
|
||||||
info->direct[PSC_VOLTAGE_OUT] = true;
|
info->direct[PSC_VOLTAGE_OUT] = true;
|
||||||
info->direct[PSC_CURRENT_OUT] = true;
|
info->direct[PSC_CURRENT_OUT] = true;
|
||||||
info->m[PSC_CURRENT_OUT] = 800;
|
info->m[PSC_CURRENT_OUT] = 807;
|
||||||
info->b[PSC_CURRENT_OUT] = 20475;
|
info->b[PSC_CURRENT_OUT] = 20475;
|
||||||
info->R[PSC_CURRENT_OUT] = -1;
|
info->R[PSC_CURRENT_OUT] = -1;
|
||||||
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
|
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
|
||||||
|
|
||||||
if (config & ADM1275_VRANGE) {
|
if (config & ADM1275_VRANGE) {
|
||||||
info->m[PSC_VOLTAGE_IN] = 19045;
|
info->m[PSC_VOLTAGE_IN] = 19199;
|
||||||
info->b[PSC_VOLTAGE_IN] = 0;
|
info->b[PSC_VOLTAGE_IN] = 0;
|
||||||
info->R[PSC_VOLTAGE_IN] = -2;
|
info->R[PSC_VOLTAGE_IN] = -2;
|
||||||
info->m[PSC_VOLTAGE_OUT] = 19045;
|
info->m[PSC_VOLTAGE_OUT] = 19199;
|
||||||
info->b[PSC_VOLTAGE_OUT] = 0;
|
info->b[PSC_VOLTAGE_OUT] = 0;
|
||||||
info->R[PSC_VOLTAGE_OUT] = -2;
|
info->R[PSC_VOLTAGE_OUT] = -2;
|
||||||
} else {
|
} else {
|
||||||
info->m[PSC_VOLTAGE_IN] = 6666;
|
info->m[PSC_VOLTAGE_IN] = 6720;
|
||||||
info->b[PSC_VOLTAGE_IN] = 0;
|
info->b[PSC_VOLTAGE_IN] = 0;
|
||||||
info->R[PSC_VOLTAGE_IN] = -1;
|
info->R[PSC_VOLTAGE_IN] = -1;
|
||||||
info->m[PSC_VOLTAGE_OUT] = 6666;
|
info->m[PSC_VOLTAGE_OUT] = 6720;
|
||||||
info->b[PSC_VOLTAGE_OUT] = 0;
|
info->b[PSC_VOLTAGE_OUT] = 0;
|
||||||
info->R[PSC_VOLTAGE_OUT] = -1;
|
info->R[PSC_VOLTAGE_OUT] = -1;
|
||||||
}
|
}
|
||||||
|
@ -674,6 +674,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
|
|||||||
else
|
else
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
|
|
||||||
|
ACPI_FREE(ret);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1538,7 +1538,7 @@ static struct attribute *it87_attributes_label[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct attribute_group it87_group_label = {
|
static const struct attribute_group it87_group_label = {
|
||||||
.attrs = it87_attributes_vid,
|
.attrs = it87_attributes_label,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* SuperIO detection - will change isa_address if a chip is found */
|
/* SuperIO detection - will change isa_address if a chip is found */
|
||||||
|
@ -40,6 +40,8 @@ struct max1111_data {
|
|||||||
struct spi_transfer xfer[2];
|
struct spi_transfer xfer[2];
|
||||||
uint8_t *tx_buf;
|
uint8_t *tx_buf;
|
||||||
uint8_t *rx_buf;
|
uint8_t *rx_buf;
|
||||||
|
struct mutex drvdata_lock;
|
||||||
|
/* protect msg, xfer and buffers from multiple access */
|
||||||
};
|
};
|
||||||
|
|
||||||
static int max1111_read(struct device *dev, int channel)
|
static int max1111_read(struct device *dev, int channel)
|
||||||
@ -48,6 +50,9 @@ static int max1111_read(struct device *dev, int channel)
|
|||||||
uint8_t v1, v2;
|
uint8_t v1, v2;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
/* writing to drvdata struct is not thread safe, wait on mutex */
|
||||||
|
mutex_lock(&data->drvdata_lock);
|
||||||
|
|
||||||
data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) |
|
data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) |
|
||||||
MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
|
MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
|
||||||
MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
|
MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
|
||||||
@ -55,12 +60,15 @@ static int max1111_read(struct device *dev, int channel)
|
|||||||
err = spi_sync(data->spi, &data->msg);
|
err = spi_sync(data->spi, &data->msg);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(dev, "spi_sync failed with %d\n", err);
|
dev_err(dev, "spi_sync failed with %d\n", err);
|
||||||
|
mutex_unlock(&data->drvdata_lock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
v1 = data->rx_buf[0];
|
v1 = data->rx_buf[0];
|
||||||
v2 = data->rx_buf[1];
|
v2 = data->rx_buf[1];
|
||||||
|
|
||||||
|
mutex_unlock(&data->drvdata_lock);
|
||||||
|
|
||||||
if ((v1 & 0xc0) || (v2 & 0x3f))
|
if ((v1 & 0xc0) || (v2 & 0x3f))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -176,6 +184,8 @@ static int __devinit max1111_probe(struct spi_device *spi)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_free_data;
|
goto err_free_data;
|
||||||
|
|
||||||
|
mutex_init(&data->drvdata_lock);
|
||||||
|
|
||||||
data->spi = spi;
|
data->spi = spi;
|
||||||
spi_set_drvdata(spi, data);
|
spi_set_drvdata(spi, data);
|
||||||
|
|
||||||
@ -213,6 +223,7 @@ static int __devexit max1111_remove(struct spi_device *spi)
|
|||||||
|
|
||||||
hwmon_device_unregister(data->hwmon_dev);
|
hwmon_device_unregister(data->hwmon_dev);
|
||||||
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
|
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
|
||||||
|
mutex_destroy(&data->drvdata_lock);
|
||||||
kfree(data->rx_buf);
|
kfree(data->rx_buf);
|
||||||
kfree(data->tx_buf);
|
kfree(data->tx_buf);
|
||||||
kfree(data);
|
kfree(data);
|
||||||
|
@ -362,7 +362,7 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
|
|||||||
* Convert linear sensor values to milli- or micro-units
|
* Convert linear sensor values to milli- or micro-units
|
||||||
* depending on sensor type.
|
* depending on sensor type.
|
||||||
*/
|
*/
|
||||||
static int pmbus_reg2data_linear(struct pmbus_data *data,
|
static long pmbus_reg2data_linear(struct pmbus_data *data,
|
||||||
struct pmbus_sensor *sensor)
|
struct pmbus_sensor *sensor)
|
||||||
{
|
{
|
||||||
s16 exponent;
|
s16 exponent;
|
||||||
@ -397,14 +397,14 @@ static int pmbus_reg2data_linear(struct pmbus_data *data,
|
|||||||
else
|
else
|
||||||
val >>= -exponent;
|
val >>= -exponent;
|
||||||
|
|
||||||
return (int)val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert direct sensor values to milli- or micro-units
|
* Convert direct sensor values to milli- or micro-units
|
||||||
* depending on sensor type.
|
* depending on sensor type.
|
||||||
*/
|
*/
|
||||||
static int pmbus_reg2data_direct(struct pmbus_data *data,
|
static long pmbus_reg2data_direct(struct pmbus_data *data,
|
||||||
struct pmbus_sensor *sensor)
|
struct pmbus_sensor *sensor)
|
||||||
{
|
{
|
||||||
long val = (s16) sensor->data;
|
long val = (s16) sensor->data;
|
||||||
@ -440,12 +440,12 @@ static int pmbus_reg2data_direct(struct pmbus_data *data,
|
|||||||
R++;
|
R++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (int)((val - b) / m);
|
return (val - b) / m;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
|
static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
|
||||||
{
|
{
|
||||||
int val;
|
long val;
|
||||||
|
|
||||||
if (data->info->direct[sensor->class])
|
if (data->info->direct[sensor->class])
|
||||||
val = pmbus_reg2data_direct(data, sensor);
|
val = pmbus_reg2data_direct(data, sensor);
|
||||||
@ -619,7 +619,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
|
|||||||
if (!s1 && !s2)
|
if (!s1 && !s2)
|
||||||
*val = !!regval;
|
*val = !!regval;
|
||||||
else {
|
else {
|
||||||
int v1, v2;
|
long v1, v2;
|
||||||
struct pmbus_sensor *sensor1, *sensor2;
|
struct pmbus_sensor *sensor1, *sensor2;
|
||||||
|
|
||||||
sensor1 = &data->sensors[s1];
|
sensor1 = &data->sensors[s1];
|
||||||
@ -661,7 +661,7 @@ static ssize_t pmbus_show_sensor(struct device *dev,
|
|||||||
if (sensor->data < 0)
|
if (sensor->data < 0)
|
||||||
return sensor->data;
|
return sensor->data;
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
|
return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor));
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t pmbus_set_sensor(struct device *dev,
|
static ssize_t pmbus_set_sensor(struct device *dev,
|
||||||
|
@ -1988,6 +1988,14 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
|
|||||||
if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
|
if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
|
||||||
if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
|
if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
|
||||||
goto err0;
|
goto err0;
|
||||||
|
|
||||||
|
/* If we took control of the bus, we need to force
|
||||||
|
reinitialization. This is because many ts_bus_ctrl()
|
||||||
|
functions strobe the RESET pin on the demod, and if the
|
||||||
|
frontend thread already exists then the dvb_init() routine
|
||||||
|
won't get called (which is what usually does initial
|
||||||
|
register configuration). */
|
||||||
|
fepriv->reinitialise = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = dvb_generic_open (inode, file)) < 0)
|
if ((ret = dvb_generic_open (inode, file)) < 0)
|
||||||
|
@ -168,7 +168,7 @@ config RADIO_MAXIRADIO
|
|||||||
|
|
||||||
config RADIO_MIROPCM20
|
config RADIO_MIROPCM20
|
||||||
tristate "miroSOUND PCM20 radio"
|
tristate "miroSOUND PCM20 radio"
|
||||||
depends on ISA && VIDEO_V4L2 && SND
|
depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND
|
||||||
select SND_ISA
|
select SND_ISA
|
||||||
select SND_MIRO
|
select SND_MIRO
|
||||||
---help---
|
---help---
|
||||||
@ -201,7 +201,7 @@ config RADIO_SF16FMI
|
|||||||
|
|
||||||
config RADIO_SF16FMR2
|
config RADIO_SF16FMR2
|
||||||
tristate "SF16FMR2 Radio"
|
tristate "SF16FMR2 Radio"
|
||||||
depends on ISA && VIDEO_V4L2
|
depends on ISA && VIDEO_V4L2 && SND
|
||||||
---help---
|
---help---
|
||||||
Choose Y here if you have one of these FM radio cards.
|
Choose Y here if you have one of these FM radio cards.
|
||||||
|
|
||||||
|
@ -1033,7 +1033,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
|
|||||||
char ps_name[MAX_RDS_PS_NAME + 1];
|
char ps_name[MAX_RDS_PS_NAME + 1];
|
||||||
|
|
||||||
len = control->size - 1;
|
len = control->size - 1;
|
||||||
if (len > MAX_RDS_PS_NAME) {
|
if (len < 0 || len > MAX_RDS_PS_NAME) {
|
||||||
rval = -ERANGE;
|
rval = -ERANGE;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -1057,7 +1057,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
|
|||||||
char radio_text[MAX_RDS_RADIO_TEXT + 1];
|
char radio_text[MAX_RDS_RADIO_TEXT + 1];
|
||||||
|
|
||||||
len = control->size - 1;
|
len = control->size - 1;
|
||||||
if (len > MAX_RDS_RADIO_TEXT) {
|
if (len < 0 || len > MAX_RDS_RADIO_TEXT) {
|
||||||
rval = -ERANGE;
|
rval = -ERANGE;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
@ -558,9 +558,10 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
|
|||||||
inout, data1);
|
inout, data1);
|
||||||
break;
|
break;
|
||||||
case MCE_CMD_S_TIMEOUT:
|
case MCE_CMD_S_TIMEOUT:
|
||||||
/* value is in units of 50us, so x*50/100 or x/2 ms */
|
/* value is in units of 50us, so x*50/1000 ms */
|
||||||
dev_info(dev, "%s receive timeout of %d ms\n",
|
dev_info(dev, "%s receive timeout of %d ms\n",
|
||||||
inout, ((data1 << 8) | data2) / 2);
|
inout,
|
||||||
|
((data1 << 8) | data2) * MCE_TIME_UNIT / 1000);
|
||||||
break;
|
break;
|
||||||
case MCE_CMD_G_TIMEOUT:
|
case MCE_CMD_G_TIMEOUT:
|
||||||
dev_info(dev, "Get receive timeout\n");
|
dev_info(dev, "Get receive timeout\n");
|
||||||
@ -847,7 +848,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
|
|||||||
switch (ir->buf_in[index]) {
|
switch (ir->buf_in[index]) {
|
||||||
/* 2-byte return value commands */
|
/* 2-byte return value commands */
|
||||||
case MCE_CMD_S_TIMEOUT:
|
case MCE_CMD_S_TIMEOUT:
|
||||||
ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
|
ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* 1-byte return value commands */
|
/* 1-byte return value commands */
|
||||||
@ -1078,7 +1079,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
|
|||||||
rc->priv = ir;
|
rc->priv = ir;
|
||||||
rc->driver_type = RC_DRIVER_IR_RAW;
|
rc->driver_type = RC_DRIVER_IR_RAW;
|
||||||
rc->allowed_protos = RC_TYPE_ALL;
|
rc->allowed_protos = RC_TYPE_ALL;
|
||||||
rc->timeout = US_TO_NS(1000);
|
rc->timeout = MS_TO_NS(100);
|
||||||
if (!ir->flags.no_tx) {
|
if (!ir->flags.no_tx) {
|
||||||
rc->s_tx_mask = mceusb_set_tx_mask;
|
rc->s_tx_mask = mceusb_set_tx_mask;
|
||||||
rc->s_tx_carrier = mceusb_set_tx_carrier;
|
rc->s_tx_carrier = mceusb_set_tx_carrier;
|
||||||
|
@ -1110,7 +1110,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
|
|||||||
rdev->dev.parent = &pdev->dev;
|
rdev->dev.parent = &pdev->dev;
|
||||||
rdev->driver_name = NVT_DRIVER_NAME;
|
rdev->driver_name = NVT_DRIVER_NAME;
|
||||||
rdev->map_name = RC_MAP_RC6_MCE;
|
rdev->map_name = RC_MAP_RC6_MCE;
|
||||||
rdev->timeout = US_TO_NS(1000);
|
rdev->timeout = MS_TO_NS(100);
|
||||||
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
|
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
|
||||||
rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
|
rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
|
||||||
#if 0
|
#if 0
|
||||||
|
@ -2060,10 +2060,6 @@ static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
|
|||||||
goto fail_irq;
|
goto fail_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_enable_msi(pci_dev))
|
|
||||||
err = request_irq(pci_dev->irq, cx23885_irq,
|
|
||||||
IRQF_DISABLED, dev->name, dev);
|
|
||||||
else
|
|
||||||
err = request_irq(pci_dev->irq, cx23885_irq,
|
err = request_irq(pci_dev->irq, cx23885_irq,
|
||||||
IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
|
IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
@ -2114,7 +2110,6 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
|
|||||||
|
|
||||||
/* unregister stuff */
|
/* unregister stuff */
|
||||||
free_irq(pci_dev->irq, dev);
|
free_irq(pci_dev->irq, dev);
|
||||||
pci_disable_msi(pci_dev);
|
|
||||||
|
|
||||||
cx23885_dev_unregister(dev);
|
cx23885_dev_unregister(dev);
|
||||||
v4l2_device_unregister(v4l2_dev);
|
v4l2_device_unregister(v4l2_dev);
|
||||||
|
@ -714,10 +714,19 @@ static int tuner_remove(struct i2c_client *client)
|
|||||||
* returns 0.
|
* returns 0.
|
||||||
* This function is needed for boards that have a separate tuner for
|
* This function is needed for boards that have a separate tuner for
|
||||||
* radio (like devices with tea5767).
|
* radio (like devices with tea5767).
|
||||||
|
* NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
|
||||||
|
* select a TV frequency. So, t_mode = T_ANALOG_TV could actually
|
||||||
|
* be used to represent a Digital TV too.
|
||||||
*/
|
*/
|
||||||
static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
|
static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
|
||||||
{
|
{
|
||||||
if ((1 << mode & t->mode_mask) == 0)
|
int t_mode;
|
||||||
|
if (mode == V4L2_TUNER_RADIO)
|
||||||
|
t_mode = T_RADIO;
|
||||||
|
else
|
||||||
|
t_mode = T_ANALOG_TV;
|
||||||
|
|
||||||
|
if ((t_mode & t->mode_mask) == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -984,7 +993,7 @@ static void tuner_status(struct dvb_frontend *fe)
|
|||||||
case V4L2_TUNER_RADIO:
|
case V4L2_TUNER_RADIO:
|
||||||
p = "radio";
|
p = "radio";
|
||||||
break;
|
break;
|
||||||
case V4L2_TUNER_DIGITAL_TV:
|
case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
|
||||||
p = "digital TV";
|
p = "digital TV";
|
||||||
break;
|
break;
|
||||||
case V4L2_TUNER_ANALOG_TV:
|
case V4L2_TUNER_ANALOG_TV:
|
||||||
@ -1135,9 +1144,8 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
|
|||||||
return 0;
|
return 0;
|
||||||
if (vt->type == t->mode && analog_ops->get_afc)
|
if (vt->type == t->mode && analog_ops->get_afc)
|
||||||
vt->afc = analog_ops->get_afc(&t->fe);
|
vt->afc = analog_ops->get_afc(&t->fe);
|
||||||
if (vt->type == V4L2_TUNER_ANALOG_TV)
|
if (t->mode != V4L2_TUNER_RADIO) {
|
||||||
vt->capability |= V4L2_TUNER_CAP_NORM;
|
vt->capability |= V4L2_TUNER_CAP_NORM;
|
||||||
if (vt->type != V4L2_TUNER_RADIO) {
|
|
||||||
vt->rangelow = tv_range[0] * 16;
|
vt->rangelow = tv_range[0] * 16;
|
||||||
vt->rangehigh = tv_range[1] * 16;
|
vt->rangehigh = tv_range[1] * 16;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -247,12 +247,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
|
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
|
||||||
|
card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
|
||||||
if (card->csd.structure == 3) {
|
if (card->csd.structure == 3) {
|
||||||
int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
|
if (card->ext_csd.raw_ext_csd_structure > 2) {
|
||||||
if (ext_csd_struct > 2) {
|
|
||||||
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
|
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
|
||||||
"version %d\n", mmc_hostname(card->host),
|
"version %d\n", mmc_hostname(card->host),
|
||||||
ext_csd_struct);
|
card->ext_csd.raw_ext_csd_structure);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -266,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
|
||||||
|
card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
|
||||||
|
card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
|
||||||
|
card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
|
||||||
if (card->ext_csd.rev >= 2) {
|
if (card->ext_csd.rev >= 2) {
|
||||||
card->ext_csd.sectors =
|
card->ext_csd.sectors =
|
||||||
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
|
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
|
||||||
@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|||||||
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
|
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
|
||||||
mmc_card_set_blockaddr(card);
|
mmc_card_set_blockaddr(card);
|
||||||
}
|
}
|
||||||
|
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
|
||||||
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
|
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
|
||||||
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
|
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
|
||||||
EXT_CSD_CARD_TYPE_26:
|
EXT_CSD_CARD_TYPE_26:
|
||||||
@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|||||||
mmc_hostname(card->host));
|
mmc_hostname(card->host));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
|
||||||
|
card->ext_csd.raw_erase_timeout_mult =
|
||||||
|
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
|
||||||
|
card->ext_csd.raw_hc_erase_grp_size =
|
||||||
|
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
|
||||||
if (card->ext_csd.rev >= 3) {
|
if (card->ext_csd.rev >= 3) {
|
||||||
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
|
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
|
||||||
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
|
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
|
||||||
@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|||||||
card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
|
card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
card->ext_csd.raw_hc_erase_gap_size =
|
||||||
|
ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
|
||||||
|
card->ext_csd.raw_sec_trim_mult =
|
||||||
|
ext_csd[EXT_CSD_SEC_TRIM_MULT];
|
||||||
|
card->ext_csd.raw_sec_erase_mult =
|
||||||
|
ext_csd[EXT_CSD_SEC_ERASE_MULT];
|
||||||
|
card->ext_csd.raw_sec_feature_support =
|
||||||
|
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
|
||||||
|
card->ext_csd.raw_trim_mult =
|
||||||
|
ext_csd[EXT_CSD_TRIM_MULT];
|
||||||
if (card->ext_csd.rev >= 4) {
|
if (card->ext_csd.rev >= 4) {
|
||||||
/*
|
/*
|
||||||
* Enhanced area feature support -- check whether the eMMC
|
* Enhanced area feature support -- check whether the eMMC
|
||||||
@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
|
static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
|
||||||
unsigned bus_width)
|
|
||||||
{
|
{
|
||||||
u8 *bw_ext_csd;
|
u8 *bw_ext_csd;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = mmc_get_ext_csd(card, &bw_ext_csd);
|
if (bus_width == MMC_BUS_WIDTH_1)
|
||||||
if (err)
|
return 0;
|
||||||
return err;
|
|
||||||
|
|
||||||
if ((ext_csd == NULL || bw_ext_csd == NULL)) {
|
err = mmc_get_ext_csd(card, &bw_ext_csd);
|
||||||
|
|
||||||
|
if (err || bw_ext_csd == NULL) {
|
||||||
if (bus_width != MMC_BUS_WIDTH_1)
|
if (bus_width != MMC_BUS_WIDTH_1)
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* only compare read only fields */
|
/* only compare read only fields */
|
||||||
err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
|
err = (!(card->ext_csd.raw_partition_support ==
|
||||||
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
|
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
|
||||||
(ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
|
(card->ext_csd.raw_erased_mem_count ==
|
||||||
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
|
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
|
||||||
(ext_csd[EXT_CSD_REV] ==
|
(card->ext_csd.rev ==
|
||||||
bw_ext_csd[EXT_CSD_REV]) &&
|
bw_ext_csd[EXT_CSD_REV]) &&
|
||||||
(ext_csd[EXT_CSD_STRUCTURE] ==
|
(card->ext_csd.raw_ext_csd_structure ==
|
||||||
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
|
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
|
||||||
(ext_csd[EXT_CSD_CARD_TYPE] ==
|
(card->ext_csd.raw_card_type ==
|
||||||
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
|
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
|
||||||
(ext_csd[EXT_CSD_S_A_TIMEOUT] ==
|
(card->ext_csd.raw_s_a_timeout ==
|
||||||
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
|
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
|
||||||
(ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
|
(card->ext_csd.raw_hc_erase_gap_size ==
|
||||||
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
|
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
|
||||||
(ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
|
(card->ext_csd.raw_erase_timeout_mult ==
|
||||||
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
|
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
|
||||||
(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
|
(card->ext_csd.raw_hc_erase_grp_size ==
|
||||||
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
|
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
|
||||||
(ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
|
(card->ext_csd.raw_sec_trim_mult ==
|
||||||
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
|
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
|
||||||
(ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
|
(card->ext_csd.raw_sec_erase_mult ==
|
||||||
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
|
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
|
||||||
(ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
|
(card->ext_csd.raw_sec_feature_support ==
|
||||||
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
|
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
|
||||||
(ext_csd[EXT_CSD_TRIM_MULT] ==
|
(card->ext_csd.raw_trim_mult ==
|
||||||
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
|
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
|
||||||
memcmp(&ext_csd[EXT_CSD_SEC_CNT],
|
(card->ext_csd.raw_sectors[0] ==
|
||||||
&bw_ext_csd[EXT_CSD_SEC_CNT],
|
bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
|
||||||
4) != 0);
|
(card->ext_csd.raw_sectors[1] ==
|
||||||
|
bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
|
||||||
|
(card->ext_csd.raw_sectors[2] ==
|
||||||
|
bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
|
||||||
|
(card->ext_csd.raw_sectors[3] ==
|
||||||
|
bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
|
||||||
if (err)
|
if (err)
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
||||||
@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
|
|||||||
*/
|
*/
|
||||||
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
|
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
|
||||||
err = mmc_compare_ext_csds(card,
|
err = mmc_compare_ext_csds(card,
|
||||||
ext_csd,
|
|
||||||
bus_width);
|
bus_width);
|
||||||
else
|
else
|
||||||
err = mmc_bus_test(card, bus_width);
|
err = mmc_bus_test(card, bus_width);
|
||||||
|
@ -1428,9 +1428,9 @@ out:
|
|||||||
return features;
|
return features;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
|
#define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
|
||||||
NETIF_F_SOFT_FEATURES | \
|
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
|
||||||
NETIF_F_LRO)
|
NETIF_F_HIGHDMA | NETIF_F_LRO)
|
||||||
|
|
||||||
static void bond_compute_features(struct bonding *bond)
|
static void bond_compute_features(struct bonding *bond)
|
||||||
{
|
{
|
||||||
|
@ -2289,6 +2289,23 @@ static int gfar_set_mac_address(struct net_device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check if rx parser should be activated */
|
||||||
|
void gfar_check_rx_parser_mode(struct gfar_private *priv)
|
||||||
|
{
|
||||||
|
struct gfar __iomem *regs;
|
||||||
|
u32 tempval;
|
||||||
|
|
||||||
|
regs = priv->gfargrp[0].regs;
|
||||||
|
|
||||||
|
tempval = gfar_read(®s->rctrl);
|
||||||
|
/* If parse is no longer required, then disable parser */
|
||||||
|
if (tempval & RCTRL_REQ_PARSER)
|
||||||
|
tempval |= RCTRL_PRSDEP_INIT;
|
||||||
|
else
|
||||||
|
tempval &= ~RCTRL_PRSDEP_INIT;
|
||||||
|
gfar_write(®s->rctrl, tempval);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Enables and disables VLAN insertion/extraction */
|
/* Enables and disables VLAN insertion/extraction */
|
||||||
static void gfar_vlan_rx_register(struct net_device *dev,
|
static void gfar_vlan_rx_register(struct net_device *dev,
|
||||||
@ -2325,12 +2342,9 @@ static void gfar_vlan_rx_register(struct net_device *dev,
|
|||||||
/* Disable VLAN tag extraction */
|
/* Disable VLAN tag extraction */
|
||||||
tempval = gfar_read(®s->rctrl);
|
tempval = gfar_read(®s->rctrl);
|
||||||
tempval &= ~RCTRL_VLEX;
|
tempval &= ~RCTRL_VLEX;
|
||||||
/* If parse is no longer required, then disable parser */
|
|
||||||
if (tempval & RCTRL_REQ_PARSER)
|
|
||||||
tempval |= RCTRL_PRSDEP_INIT;
|
|
||||||
else
|
|
||||||
tempval &= ~RCTRL_PRSDEP_INIT;
|
|
||||||
gfar_write(®s->rctrl, tempval);
|
gfar_write(®s->rctrl, tempval);
|
||||||
|
|
||||||
|
gfar_check_rx_parser_mode(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
gfar_change_mtu(dev, dev->mtu);
|
gfar_change_mtu(dev, dev->mtu);
|
||||||
|
@ -274,7 +274,7 @@ extern const char gfar_driver_version[];
|
|||||||
#define RCTRL_PROM 0x00000008
|
#define RCTRL_PROM 0x00000008
|
||||||
#define RCTRL_EMEN 0x00000002
|
#define RCTRL_EMEN 0x00000002
|
||||||
#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
|
#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
|
||||||
RCTRL_TUCSEN)
|
RCTRL_TUCSEN | RCTRL_FILREN)
|
||||||
#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
|
#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
|
||||||
RCTRL_PRSDEP_INIT)
|
RCTRL_PRSDEP_INIT)
|
||||||
#define RCTRL_EXTHASH (RCTRL_GHTX)
|
#define RCTRL_EXTHASH (RCTRL_GHTX)
|
||||||
@ -1156,6 +1156,7 @@ extern void gfar_configure_coalescing(struct gfar_private *priv,
|
|||||||
unsigned long tx_mask, unsigned long rx_mask);
|
unsigned long tx_mask, unsigned long rx_mask);
|
||||||
void gfar_init_sysfs(struct net_device *dev);
|
void gfar_init_sysfs(struct net_device *dev);
|
||||||
int gfar_set_features(struct net_device *dev, u32 features);
|
int gfar_set_features(struct net_device *dev, u32 features);
|
||||||
|
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
|
||||||
|
|
||||||
extern const struct ethtool_ops gfar_ethtool_ops;
|
extern const struct ethtool_ops gfar_ethtool_ops;
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
|
|||||||
module_param(mtu, int, 0);
|
module_param(mtu, int, 0);
|
||||||
module_param(debug, int, 0);
|
module_param(debug, int, 0);
|
||||||
module_param(rx_copybreak, int, 0);
|
module_param(rx_copybreak, int, 0);
|
||||||
module_param(dspcfg_workaround, int, 1);
|
module_param(dspcfg_workaround, int, 0);
|
||||||
module_param_array(options, int, NULL, 0);
|
module_param_array(options, int, NULL, 0);
|
||||||
module_param_array(full_duplex, int, NULL, 0);
|
module_param_array(full_duplex, int, NULL, 0);
|
||||||
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
|
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
|
||||||
@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
|
|||||||
np->rx_ring[i].cmd_status = 0;
|
np->rx_ring[i].cmd_status = 0;
|
||||||
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
|
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
|
||||||
if (np->rx_skbuff[i]) {
|
if (np->rx_skbuff[i]) {
|
||||||
pci_unmap_single(np->pci_dev,
|
pci_unmap_single(np->pci_dev, np->rx_dma[i],
|
||||||
np->rx_dma[i], buflen,
|
buflen + NATSEMI_PADDING,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
dev_kfree_skb(np->rx_skbuff[i]);
|
dev_kfree_skb(np->rx_skbuff[i]);
|
||||||
}
|
}
|
||||||
|
@ -348,8 +348,9 @@ static int pppoe_device_event(struct notifier_block *this,
|
|||||||
|
|
||||||
/* Only look at sockets that are using this specific device. */
|
/* Only look at sockets that are using this specific device. */
|
||||||
switch (event) {
|
switch (event) {
|
||||||
|
case NETDEV_CHANGEADDR:
|
||||||
case NETDEV_CHANGEMTU:
|
case NETDEV_CHANGEMTU:
|
||||||
/* A change in mtu is a bad thing, requiring
|
/* A change in mtu or address is a bad thing, requiring
|
||||||
* LCP re-negotiation.
|
* LCP re-negotiation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -677,9 +677,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
|
|||||||
if (status & RX_FIFO_FULL)
|
if (status & RX_FIFO_FULL)
|
||||||
dev->stats.rx_fifo_errors++;
|
dev->stats.rx_fifo_errors++;
|
||||||
|
|
||||||
|
if (likely(napi_schedule_prep(&lp->napi))) {
|
||||||
/* Mask off RX interrupt */
|
/* Mask off RX interrupt */
|
||||||
misr &= ~RX_INTS;
|
misr &= ~RX_INTS;
|
||||||
napi_schedule(&lp->napi);
|
__napi_schedule(&lp->napi);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TX interrupt request */
|
/* TX interrupt request */
|
||||||
|
@ -182,10 +182,10 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
|
|||||||
#ifdef SL_INCLUDE_CSLIP
|
#ifdef SL_INCLUDE_CSLIP
|
||||||
cbuff = xchg(&sl->cbuff, cbuff);
|
cbuff = xchg(&sl->cbuff, cbuff);
|
||||||
slcomp = xchg(&sl->slcomp, slcomp);
|
slcomp = xchg(&sl->slcomp, slcomp);
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_SLIP_MODE_SLIP6
|
#ifdef CONFIG_SLIP_MODE_SLIP6
|
||||||
sl->xdata = 0;
|
sl->xdata = 0;
|
||||||
sl->xbits = 0;
|
sl->xbits = 0;
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
spin_unlock_bh(&sl->lock);
|
spin_unlock_bh(&sl->lock);
|
||||||
err = 0;
|
err = 0;
|
||||||
|
@ -879,7 +879,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
|
|||||||
txptr = db->tx_remove_ptr;
|
txptr = db->tx_remove_ptr;
|
||||||
while(db->tx_packet_cnt) {
|
while(db->tx_packet_cnt) {
|
||||||
tdes0 = le32_to_cpu(txptr->tdes0);
|
tdes0 = le32_to_cpu(txptr->tdes0);
|
||||||
pr_debug("tdes0=%x\n", tdes0);
|
|
||||||
if (tdes0 & 0x80000000)
|
if (tdes0 & 0x80000000)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -889,7 +888,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
|
|||||||
|
|
||||||
/* Transmit statistic counter */
|
/* Transmit statistic counter */
|
||||||
if ( tdes0 != 0x7fffffff ) {
|
if ( tdes0 != 0x7fffffff ) {
|
||||||
pr_debug("tdes0=%x\n", tdes0);
|
|
||||||
dev->stats.collisions += (tdes0 >> 3) & 0xf;
|
dev->stats.collisions += (tdes0 >> 3) & 0xf;
|
||||||
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
|
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
|
||||||
if (tdes0 & TDES0_ERR_MASK) {
|
if (tdes0 & TDES0_ERR_MASK) {
|
||||||
@ -986,7 +984,6 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
|
|||||||
/* error summary bit check */
|
/* error summary bit check */
|
||||||
if (rdes0 & 0x8000) {
|
if (rdes0 & 0x8000) {
|
||||||
/* This is a error packet */
|
/* This is a error packet */
|
||||||
pr_debug("rdes0: %x\n", rdes0);
|
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
if (rdes0 & 1)
|
if (rdes0 & 1)
|
||||||
dev->stats.rx_fifo_errors++;
|
dev->stats.rx_fifo_errors++;
|
||||||
@ -1638,7 +1635,6 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
|
|||||||
else /* DM9102/DM9102A */
|
else /* DM9102/DM9102A */
|
||||||
phy_mode = phy_read(db->ioaddr,
|
phy_mode = phy_read(db->ioaddr,
|
||||||
db->phy_addr, 17, db->chip_id) & 0xf000;
|
db->phy_addr, 17, db->chip_id) & 0xf000;
|
||||||
pr_debug("Phy_mode %x\n", phy_mode);
|
|
||||||
switch (phy_mode) {
|
switch (phy_mode) {
|
||||||
case 0x1000: db->op_mode = DMFE_10MHF; break;
|
case 0x1000: db->op_mode = DMFE_10MHF; break;
|
||||||
case 0x2000: db->op_mode = DMFE_10MFD; break;
|
case 0x2000: db->op_mode = DMFE_10MFD; break;
|
||||||
|
@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
|
|||||||
|
|
||||||
remove_net_device(hso_net->parent);
|
remove_net_device(hso_net->parent);
|
||||||
|
|
||||||
if (hso_net->net) {
|
if (hso_net->net)
|
||||||
unregister_netdev(hso_net->net);
|
unregister_netdev(hso_net->net);
|
||||||
free_netdev(hso_net->net);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* start freeing */
|
/* start freeing */
|
||||||
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
|
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
|
||||||
@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
|
|||||||
kfree(hso_net->mux_bulk_tx_buf);
|
kfree(hso_net->mux_bulk_tx_buf);
|
||||||
hso_net->mux_bulk_tx_buf = NULL;
|
hso_net->mux_bulk_tx_buf = NULL;
|
||||||
|
|
||||||
|
if (hso_net->net)
|
||||||
|
free_netdev(hso_net->net);
|
||||||
|
|
||||||
kfree(hso_dev);
|
kfree(hso_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev)
|
|||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
static int ath5k_pci_suspend(struct device *dev)
|
static int ath5k_pci_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||||
|
struct ath5k_softc *sc = hw->priv;
|
||||||
|
|
||||||
ath5k_led_off(sc);
|
ath5k_led_off(sc);
|
||||||
return 0;
|
return 0;
|
||||||
@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev)
|
|||||||
static int ath5k_pci_resume(struct device *dev)
|
static int ath5k_pci_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct ath5k_softc *sc = pci_get_drvdata(pdev);
|
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||||
|
struct ath5k_softc *sc = hw->priv;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Suspend/Resume resets the PCI configuration space, so we have to
|
* Suspend/Resume resets the PCI configuration space, so we have to
|
||||||
|
@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
|||||||
struct device_attribute *attr, \
|
struct device_attribute *attr, \
|
||||||
char *buf) \
|
char *buf) \
|
||||||
{ \
|
{ \
|
||||||
struct ath5k_softc *sc = dev_get_drvdata(dev); \
|
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||||
|
struct ath5k_softc *sc = hw->priv; \
|
||||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \
|
|||||||
struct device_attribute *attr, \
|
struct device_attribute *attr, \
|
||||||
const char *buf, size_t count) \
|
const char *buf, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
struct ath5k_softc *sc = dev_get_drvdata(dev); \
|
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||||
|
struct ath5k_softc *sc = hw->priv; \
|
||||||
int val; \
|
int val; \
|
||||||
\
|
\
|
||||||
val = (int)simple_strtoul(buf, NULL, 10); \
|
val = (int)simple_strtoul(buf, NULL, 10); \
|
||||||
@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
|||||||
struct device_attribute *attr, \
|
struct device_attribute *attr, \
|
||||||
char *buf) \
|
char *buf) \
|
||||||
{ \
|
{ \
|
||||||
struct ath5k_softc *sc = dev_get_drvdata(dev); \
|
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||||
|
struct ath5k_softc *sc = hw->priv; \
|
||||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||||
} \
|
} \
|
||||||
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
|
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
|
||||||
|
@ -671,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|||||||
* TODO - this could be improved to be dependent on the rate.
|
* TODO - this could be improved to be dependent on the rate.
|
||||||
* The hardware can keep up at lower rates, but not higher rates
|
* The hardware can keep up at lower rates, but not higher rates
|
||||||
*/
|
*/
|
||||||
if (fi->keyix != ATH9K_TXKEYIX_INVALID)
|
if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
|
||||||
|
!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
|
||||||
ndelim += ATH_AGGR_ENCRYPTDELIM;
|
ndelim += ATH_AGGR_ENCRYPTDELIM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
|
|||||||
{ USB_DEVICE(0x04bb, 0x093f) },
|
{ USB_DEVICE(0x04bb, 0x093f) },
|
||||||
/* NEC WL300NU-G */
|
/* NEC WL300NU-G */
|
||||||
{ USB_DEVICE(0x0409, 0x0249) },
|
{ USB_DEVICE(0x0409, 0x0249) },
|
||||||
|
/* NEC WL300NU-AG */
|
||||||
|
{ USB_DEVICE(0x0409, 0x02b4) },
|
||||||
/* AVM FRITZ!WLAN USB Stick N */
|
/* AVM FRITZ!WLAN USB Stick N */
|
||||||
{ USB_DEVICE(0x057c, 0x8401) },
|
{ USB_DEVICE(0x057c, 0x8401) },
|
||||||
/* AVM FRITZ!WLAN USB Stick N 2.4 */
|
/* AVM FRITZ!WLAN USB Stick N 2.4 */
|
||||||
|
@ -298,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
|
|||||||
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
|
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
|
||||||
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
|
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
|
||||||
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
|
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
|
||||||
|
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
|
||||||
{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
|
{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
|
||||||
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
|
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
|
||||||
/* HP - Lite-On ,8188CUS Slim Combo */
|
/* HP - Lite-On ,8188CUS Slim Combo */
|
||||||
|
@ -76,10 +76,10 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
|
|||||||
static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
|
static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
|
||||||
{
|
{
|
||||||
if (skt->nr == 0)
|
if (skt->nr == 0)
|
||||||
gpio_request_array(vpac270_pcmcia_gpios,
|
gpio_free_array(vpac270_pcmcia_gpios,
|
||||||
ARRAY_SIZE(vpac270_pcmcia_gpios));
|
ARRAY_SIZE(vpac270_pcmcia_gpios));
|
||||||
else
|
else
|
||||||
gpio_request_array(vpac270_cf_gpios,
|
gpio_free_array(vpac270_cf_gpios,
|
||||||
ARRAY_SIZE(vpac270_cf_gpios));
|
ARRAY_SIZE(vpac270_cf_gpios));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
|
|||||||
|
|
||||||
static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
|
static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
|
||||||
{
|
{
|
||||||
|
ssb_pcicore_fix_sprom_core_index(pc);
|
||||||
|
|
||||||
/* Disable PCI interrupts. */
|
/* Disable PCI interrupts. */
|
||||||
ssb_write32(pc->dev, SSB_INTVEC, 0);
|
ssb_write32(pc->dev, SSB_INTVEC, 0);
|
||||||
|
|
||||||
|
/* Additional PCIe always once-executed workarounds */
|
||||||
|
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
|
||||||
|
ssb_pcicore_serdes_workaround(pc);
|
||||||
|
/* TODO: ASPM */
|
||||||
|
/* TODO: Clock Request Update */
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ssb_pcicore_init(struct ssb_pcicore *pc)
|
void ssb_pcicore_init(struct ssb_pcicore *pc)
|
||||||
@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
|
|||||||
if (!ssb_device_is_enabled(dev))
|
if (!ssb_device_is_enabled(dev))
|
||||||
ssb_device_enable(dev, 0);
|
ssb_device_enable(dev, 0);
|
||||||
|
|
||||||
ssb_pcicore_fix_sprom_core_index(pc);
|
|
||||||
|
|
||||||
#ifdef CONFIG_SSB_PCICORE_HOSTMODE
|
#ifdef CONFIG_SSB_PCICORE_HOSTMODE
|
||||||
pc->hostmode = pcicore_is_in_hostmode(pc);
|
pc->hostmode = pcicore_is_in_hostmode(pc);
|
||||||
if (pc->hostmode)
|
if (pc->hostmode)
|
||||||
@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
|
|||||||
#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
|
#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
|
||||||
if (!pc->hostmode)
|
if (!pc->hostmode)
|
||||||
ssb_pcicore_init_clientmode(pc);
|
ssb_pcicore_init_clientmode(pc);
|
||||||
|
|
||||||
/* Additional PCIe always once-executed workarounds */
|
|
||||||
if (dev->id.coreid == SSB_DEV_PCIE) {
|
|
||||||
ssb_pcicore_serdes_workaround(pc);
|
|
||||||
/* TODO: ASPM */
|
|
||||||
/* TODO: Clock Request Update */
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
|
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
|
||||||
|
@ -599,8 +599,7 @@ config IT87_WDT
|
|||||||
|
|
||||||
config HP_WATCHDOG
|
config HP_WATCHDOG
|
||||||
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
|
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
|
||||||
depends on X86
|
depends on X86 && PCI
|
||||||
default m
|
|
||||||
help
|
help
|
||||||
A software monitoring watchdog and NMI sourcing driver. This driver
|
A software monitoring watchdog and NMI sourcing driver. This driver
|
||||||
will detect lockups and provide a stack trace. This is a driver that
|
will detect lockups and provide a stack trace. This is a driver that
|
||||||
|
@ -1438,12 +1438,15 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
|
|||||||
struct dentry *temp;
|
struct dentry *temp;
|
||||||
char *path;
|
char *path;
|
||||||
int len, pos;
|
int len, pos;
|
||||||
|
unsigned seq;
|
||||||
|
|
||||||
if (dentry == NULL)
|
if (dentry == NULL)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
len = 0;
|
len = 0;
|
||||||
|
seq = read_seqbegin(&rename_lock);
|
||||||
|
rcu_read_lock();
|
||||||
for (temp = dentry; !IS_ROOT(temp);) {
|
for (temp = dentry; !IS_ROOT(temp);) {
|
||||||
struct inode *inode = temp->d_inode;
|
struct inode *inode = temp->d_inode;
|
||||||
if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
|
if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
|
||||||
@ -1455,10 +1458,12 @@ retry:
|
|||||||
len += 1 + temp->d_name.len;
|
len += 1 + temp->d_name.len;
|
||||||
temp = temp->d_parent;
|
temp = temp->d_parent;
|
||||||
if (temp == NULL) {
|
if (temp == NULL) {
|
||||||
|
rcu_read_unlock();
|
||||||
pr_err("build_path corrupt dentry %p\n", dentry);
|
pr_err("build_path corrupt dentry %p\n", dentry);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
if (len)
|
if (len)
|
||||||
len--; /* no leading '/' */
|
len--; /* no leading '/' */
|
||||||
|
|
||||||
@ -1467,9 +1472,12 @@ retry:
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
pos = len;
|
pos = len;
|
||||||
path[pos] = 0; /* trailing null */
|
path[pos] = 0; /* trailing null */
|
||||||
|
rcu_read_lock();
|
||||||
for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
|
for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
|
||||||
struct inode *inode = temp->d_inode;
|
struct inode *inode;
|
||||||
|
|
||||||
|
spin_lock(&temp->d_lock);
|
||||||
|
inode = temp->d_inode;
|
||||||
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
|
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
|
||||||
dout("build_path path+%d: %p SNAPDIR\n",
|
dout("build_path path+%d: %p SNAPDIR\n",
|
||||||
pos, temp);
|
pos, temp);
|
||||||
@ -1478,21 +1486,26 @@ retry:
|
|||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
pos -= temp->d_name.len;
|
pos -= temp->d_name.len;
|
||||||
if (pos < 0)
|
if (pos < 0) {
|
||||||
|
spin_unlock(&temp->d_lock);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
strncpy(path + pos, temp->d_name.name,
|
strncpy(path + pos, temp->d_name.name,
|
||||||
temp->d_name.len);
|
temp->d_name.len);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&temp->d_lock);
|
||||||
if (pos)
|
if (pos)
|
||||||
path[--pos] = '/';
|
path[--pos] = '/';
|
||||||
temp = temp->d_parent;
|
temp = temp->d_parent;
|
||||||
if (temp == NULL) {
|
if (temp == NULL) {
|
||||||
|
rcu_read_unlock();
|
||||||
pr_err("build_path corrupt dentry\n");
|
pr_err("build_path corrupt dentry\n");
|
||||||
kfree(path);
|
kfree(path);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (pos != 0) {
|
rcu_read_unlock();
|
||||||
|
if (pos != 0 || read_seqretry(&rename_lock, seq)) {
|
||||||
pr_err("build_path did not end path lookup where "
|
pr_err("build_path did not end path lookup where "
|
||||||
"expected, namelen is %d, pos is %d\n", len, pos);
|
"expected, namelen is %d, pos is %d\n", len, pos);
|
||||||
/* presumably this is only possible if racing with a
|
/* presumably this is only possible if racing with a
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/freezer.h>
|
#include <linux/freezer.h>
|
||||||
|
#include <linux/namei.h>
|
||||||
#include <net/ipv6.h>
|
#include <net/ipv6.h>
|
||||||
#include "cifsfs.h"
|
#include "cifsfs.h"
|
||||||
#include "cifspdu.h"
|
#include "cifspdu.h"
|
||||||
@ -542,14 +543,12 @@ static const struct super_operations cifs_super_ops = {
|
|||||||
static struct dentry *
|
static struct dentry *
|
||||||
cifs_get_root(struct smb_vol *vol, struct super_block *sb)
|
cifs_get_root(struct smb_vol *vol, struct super_block *sb)
|
||||||
{
|
{
|
||||||
int xid, rc;
|
struct dentry *dentry;
|
||||||
struct inode *inode;
|
|
||||||
struct qstr name;
|
|
||||||
struct dentry *dparent = NULL, *dchild = NULL, *alias;
|
|
||||||
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
|
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
|
||||||
unsigned int i, full_len, len;
|
char *full_path = NULL;
|
||||||
char *full_path = NULL, *pstart;
|
char *s, *p;
|
||||||
char sep;
|
char sep;
|
||||||
|
int xid;
|
||||||
|
|
||||||
full_path = cifs_build_path_to_root(vol, cifs_sb,
|
full_path = cifs_build_path_to_root(vol, cifs_sb,
|
||||||
cifs_sb_master_tcon(cifs_sb));
|
cifs_sb_master_tcon(cifs_sb));
|
||||||
@ -560,73 +559,32 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
|
|||||||
|
|
||||||
xid = GetXid();
|
xid = GetXid();
|
||||||
sep = CIFS_DIR_SEP(cifs_sb);
|
sep = CIFS_DIR_SEP(cifs_sb);
|
||||||
dparent = dget(sb->s_root);
|
dentry = dget(sb->s_root);
|
||||||
full_len = strlen(full_path);
|
p = s = full_path;
|
||||||
full_path[full_len] = sep;
|
|
||||||
pstart = full_path + 1;
|
|
||||||
|
|
||||||
for (i = 1, len = 0; i <= full_len; i++) {
|
do {
|
||||||
if (full_path[i] != sep || !len) {
|
struct inode *dir = dentry->d_inode;
|
||||||
len++;
|
struct dentry *child;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
full_path[i] = 0;
|
/* skip separators */
|
||||||
cFYI(1, "get dentry for %s", pstart);
|
while (*s == sep)
|
||||||
|
s++;
|
||||||
|
if (!*s)
|
||||||
|
break;
|
||||||
|
p = s++;
|
||||||
|
/* next separator */
|
||||||
|
while (*s && *s != sep)
|
||||||
|
s++;
|
||||||
|
|
||||||
name.name = pstart;
|
mutex_lock(&dir->i_mutex);
|
||||||
name.len = len;
|
child = lookup_one_len(p, dentry, s - p);
|
||||||
name.hash = full_name_hash(pstart, len);
|
mutex_unlock(&dir->i_mutex);
|
||||||
dchild = d_lookup(dparent, &name);
|
dput(dentry);
|
||||||
if (dchild == NULL) {
|
dentry = child;
|
||||||
cFYI(1, "not exists");
|
} while (!IS_ERR(dentry));
|
||||||
dchild = d_alloc(dparent, &name);
|
|
||||||
if (dchild == NULL) {
|
|
||||||
dput(dparent);
|
|
||||||
dparent = ERR_PTR(-ENOMEM);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cFYI(1, "get inode");
|
|
||||||
if (dchild->d_inode == NULL) {
|
|
||||||
cFYI(1, "not exists");
|
|
||||||
inode = NULL;
|
|
||||||
if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
|
|
||||||
rc = cifs_get_inode_info_unix(&inode, full_path,
|
|
||||||
sb, xid);
|
|
||||||
else
|
|
||||||
rc = cifs_get_inode_info(&inode, full_path,
|
|
||||||
NULL, sb, xid, NULL);
|
|
||||||
if (rc) {
|
|
||||||
dput(dchild);
|
|
||||||
dput(dparent);
|
|
||||||
dparent = ERR_PTR(rc);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
alias = d_materialise_unique(dchild, inode);
|
|
||||||
if (alias != NULL) {
|
|
||||||
dput(dchild);
|
|
||||||
if (IS_ERR(alias)) {
|
|
||||||
dput(dparent);
|
|
||||||
dparent = ERR_PTR(-EINVAL); /* XXX */
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
dchild = alias;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cFYI(1, "parent %p, child %p", dparent, dchild);
|
|
||||||
|
|
||||||
dput(dparent);
|
|
||||||
dparent = dchild;
|
|
||||||
len = 0;
|
|
||||||
pstart = full_path + i + 1;
|
|
||||||
full_path[i] = sep;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
_FreeXid(xid);
|
_FreeXid(xid);
|
||||||
kfree(full_path);
|
kfree(full_path);
|
||||||
return dparent;
|
return dentry;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cifs_set_super(struct super_block *sb, void *data)
|
static int cifs_set_super(struct super_block *sb, void *data)
|
||||||
|
@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
|
|||||||
extern const struct export_operations cifs_export_ops;
|
extern const struct export_operations cifs_export_ops;
|
||||||
#endif /* CIFS_NFSD_EXPORT */
|
#endif /* CIFS_NFSD_EXPORT */
|
||||||
|
|
||||||
#define CIFS_VERSION "1.73"
|
#define CIFS_VERSION "1.74"
|
||||||
#endif /* _CIFSFS_H */
|
#endif /* _CIFSFS_H */
|
||||||
|
@ -3485,7 +3485,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid);
|
snprintf(username, sizeof(username), "krb50x%x", fsuid);
|
||||||
vol_info->username = username;
|
vol_info->username = username;
|
||||||
vol_info->local_nls = cifs_sb->local_nls;
|
vol_info->local_nls = cifs_sb->local_nls;
|
||||||
vol_info->linux_uid = fsuid;
|
vol_info->linux_uid = fsuid;
|
||||||
|
@ -55,6 +55,7 @@ build_path_from_dentry(struct dentry *direntry)
|
|||||||
char dirsep;
|
char dirsep;
|
||||||
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
|
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
|
||||||
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
|
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
|
||||||
|
unsigned seq;
|
||||||
|
|
||||||
if (direntry == NULL)
|
if (direntry == NULL)
|
||||||
return NULL; /* not much we can do if dentry is freed and
|
return NULL; /* not much we can do if dentry is freed and
|
||||||
@ -68,22 +69,29 @@ build_path_from_dentry(struct dentry *direntry)
|
|||||||
dfsplen = 0;
|
dfsplen = 0;
|
||||||
cifs_bp_rename_retry:
|
cifs_bp_rename_retry:
|
||||||
namelen = dfsplen;
|
namelen = dfsplen;
|
||||||
|
seq = read_seqbegin(&rename_lock);
|
||||||
|
rcu_read_lock();
|
||||||
for (temp = direntry; !IS_ROOT(temp);) {
|
for (temp = direntry; !IS_ROOT(temp);) {
|
||||||
namelen += (1 + temp->d_name.len);
|
namelen += (1 + temp->d_name.len);
|
||||||
temp = temp->d_parent;
|
temp = temp->d_parent;
|
||||||
if (temp == NULL) {
|
if (temp == NULL) {
|
||||||
cERROR(1, "corrupt dentry");
|
cERROR(1, "corrupt dentry");
|
||||||
|
rcu_read_unlock();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
full_path = kmalloc(namelen+1, GFP_KERNEL);
|
full_path = kmalloc(namelen+1, GFP_KERNEL);
|
||||||
if (full_path == NULL)
|
if (full_path == NULL)
|
||||||
return full_path;
|
return full_path;
|
||||||
full_path[namelen] = 0; /* trailing null */
|
full_path[namelen] = 0; /* trailing null */
|
||||||
|
rcu_read_lock();
|
||||||
for (temp = direntry; !IS_ROOT(temp);) {
|
for (temp = direntry; !IS_ROOT(temp);) {
|
||||||
|
spin_lock(&temp->d_lock);
|
||||||
namelen -= 1 + temp->d_name.len;
|
namelen -= 1 + temp->d_name.len;
|
||||||
if (namelen < 0) {
|
if (namelen < 0) {
|
||||||
|
spin_unlock(&temp->d_lock);
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
full_path[namelen] = dirsep;
|
full_path[namelen] = dirsep;
|
||||||
@ -91,14 +99,17 @@ cifs_bp_rename_retry:
|
|||||||
temp->d_name.len);
|
temp->d_name.len);
|
||||||
cFYI(0, "name: %s", full_path + namelen);
|
cFYI(0, "name: %s", full_path + namelen);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&temp->d_lock);
|
||||||
temp = temp->d_parent;
|
temp = temp->d_parent;
|
||||||
if (temp == NULL) {
|
if (temp == NULL) {
|
||||||
cERROR(1, "corrupt dentry");
|
cERROR(1, "corrupt dentry");
|
||||||
|
rcu_read_unlock();
|
||||||
kfree(full_path);
|
kfree(full_path);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (namelen != dfsplen) {
|
rcu_read_unlock();
|
||||||
|
if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
|
||||||
cERROR(1, "did not end path lookup where expected namelen is %d",
|
cERROR(1, "did not end path lookup where expected namelen is %d",
|
||||||
namelen);
|
namelen);
|
||||||
/* presumably this is only possible if racing with a rename
|
/* presumably this is only possible if racing with a rename
|
||||||
|
@ -1737,7 +1737,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
|
|||||||
io_parms.pid = pid;
|
io_parms.pid = pid;
|
||||||
io_parms.tcon = pTcon;
|
io_parms.tcon = pTcon;
|
||||||
io_parms.offset = *poffset;
|
io_parms.offset = *poffset;
|
||||||
io_parms.length = len;
|
io_parms.length = cur_len;
|
||||||
rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
|
rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
|
||||||
&read_data, &buf_type);
|
&read_data, &buf_type);
|
||||||
pSMBr = (struct smb_com_read_rsp *)read_data;
|
pSMBr = (struct smb_com_read_rsp *)read_data;
|
||||||
|
@ -428,8 +428,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
|
|||||||
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
|
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
|
||||||
flags |= NTLMSSP_NEGOTIATE_SIGN;
|
flags |= NTLMSSP_NEGOTIATE_SIGN;
|
||||||
if (!ses->server->session_estab)
|
if (!ses->server->session_estab)
|
||||||
flags |= NTLMSSP_NEGOTIATE_KEY_XCH |
|
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
|
||||||
NTLMSSP_NEGOTIATE_EXTENDED_SEC;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
||||||
@ -465,10 +464,11 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
|
|||||||
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
|
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
|
||||||
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
|
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
|
||||||
if (ses->server->sec_mode &
|
if (ses->server->sec_mode &
|
||||||
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
|
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
|
||||||
flags |= NTLMSSP_NEGOTIATE_SIGN;
|
flags |= NTLMSSP_NEGOTIATE_SIGN;
|
||||||
if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
|
if (!ses->server->session_estab)
|
||||||
flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
|
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
|
||||||
|
}
|
||||||
|
|
||||||
tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
|
tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
|
||||||
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
||||||
|
@ -37,7 +37,7 @@ static DEFINE_MUTEX(read_mutex);
|
|||||||
/* These macros may change in future, to provide better st_ino semantics. */
|
/* These macros may change in future, to provide better st_ino semantics. */
|
||||||
#define OFFSET(x) ((x)->i_ino)
|
#define OFFSET(x) ((x)->i_ino)
|
||||||
|
|
||||||
static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
|
static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
|
||||||
{
|
{
|
||||||
if (!cino->offset)
|
if (!cino->offset)
|
||||||
return offset + 1;
|
return offset + 1;
|
||||||
@ -61,7 +61,7 @@ static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct inode *get_cramfs_inode(struct super_block *sb,
|
static struct inode *get_cramfs_inode(struct super_block *sb,
|
||||||
struct cramfs_inode *cramfs_inode, unsigned int offset)
|
const struct cramfs_inode *cramfs_inode, unsigned int offset)
|
||||||
{
|
{
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
static struct timespec zerotime;
|
static struct timespec zerotime;
|
||||||
@ -317,7 +317,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||||||
/* Set it all up.. */
|
/* Set it all up.. */
|
||||||
sb->s_op = &cramfs_ops;
|
sb->s_op = &cramfs_ops;
|
||||||
root = get_cramfs_inode(sb, &super.root, 0);
|
root = get_cramfs_inode(sb, &super.root, 0);
|
||||||
if (!root)
|
if (IS_ERR(root))
|
||||||
goto out;
|
goto out;
|
||||||
sb->s_root = d_alloc_root(root);
|
sb->s_root = d_alloc_root(root);
|
||||||
if (!sb->s_root) {
|
if (!sb->s_root) {
|
||||||
@ -423,6 +423,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|||||||
static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
|
static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
|
||||||
{
|
{
|
||||||
unsigned int offset = 0;
|
unsigned int offset = 0;
|
||||||
|
struct inode *inode = NULL;
|
||||||
int sorted;
|
int sorted;
|
||||||
|
|
||||||
mutex_lock(&read_mutex);
|
mutex_lock(&read_mutex);
|
||||||
@ -449,8 +450,8 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
|
|||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!namelen) {
|
if (!namelen) {
|
||||||
mutex_unlock(&read_mutex);
|
inode = ERR_PTR(-EIO);
|
||||||
return ERR_PTR(-EIO);
|
goto out;
|
||||||
}
|
}
|
||||||
if (name[namelen-1])
|
if (name[namelen-1])
|
||||||
break;
|
break;
|
||||||
@ -462,17 +463,18 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
|
|||||||
if (retval > 0)
|
if (retval > 0)
|
||||||
continue;
|
continue;
|
||||||
if (!retval) {
|
if (!retval) {
|
||||||
struct cramfs_inode entry = *de;
|
inode = get_cramfs_inode(dir->i_sb, de, dir_off);
|
||||||
mutex_unlock(&read_mutex);
|
break;
|
||||||
d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off));
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
/* else (retval < 0) */
|
/* else (retval < 0) */
|
||||||
if (sorted)
|
if (sorted)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
mutex_unlock(&read_mutex);
|
mutex_unlock(&read_mutex);
|
||||||
d_add(dentry, NULL);
|
if (IS_ERR(inode))
|
||||||
|
return ERR_CAST(inode);
|
||||||
|
d_add(dentry, inode);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
53
fs/dcache.c
53
fs/dcache.c
@ -1813,8 +1813,6 @@ seqretry:
|
|||||||
tname = dentry->d_name.name;
|
tname = dentry->d_name.name;
|
||||||
i = dentry->d_inode;
|
i = dentry->d_inode;
|
||||||
prefetch(tname);
|
prefetch(tname);
|
||||||
if (i)
|
|
||||||
prefetch(i);
|
|
||||||
/*
|
/*
|
||||||
* This seqcount check is required to ensure name and
|
* This seqcount check is required to ensure name and
|
||||||
* len are loaded atomically, so as not to walk off the
|
* len are loaded atomically, so as not to walk off the
|
||||||
@ -2213,14 +2211,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
|
|||||||
* The hash value has to match the hash queue that the dentry is on..
|
* The hash value has to match the hash queue that the dentry is on..
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* d_move - move a dentry
|
* __d_move - move a dentry
|
||||||
* @dentry: entry to move
|
* @dentry: entry to move
|
||||||
* @target: new dentry
|
* @target: new dentry
|
||||||
*
|
*
|
||||||
* Update the dcache to reflect the move of a file name. Negative
|
* Update the dcache to reflect the move of a file name. Negative
|
||||||
* dcache entries should not be moved in this way.
|
* dcache entries should not be moved in this way. Caller hold
|
||||||
|
* rename_lock.
|
||||||
*/
|
*/
|
||||||
void d_move(struct dentry * dentry, struct dentry * target)
|
static void __d_move(struct dentry * dentry, struct dentry * target)
|
||||||
{
|
{
|
||||||
if (!dentry->d_inode)
|
if (!dentry->d_inode)
|
||||||
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
|
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
|
||||||
@ -2228,8 +2227,6 @@ void d_move(struct dentry * dentry, struct dentry * target)
|
|||||||
BUG_ON(d_ancestor(dentry, target));
|
BUG_ON(d_ancestor(dentry, target));
|
||||||
BUG_ON(d_ancestor(target, dentry));
|
BUG_ON(d_ancestor(target, dentry));
|
||||||
|
|
||||||
write_seqlock(&rename_lock);
|
|
||||||
|
|
||||||
dentry_lock_for_move(dentry, target);
|
dentry_lock_for_move(dentry, target);
|
||||||
|
|
||||||
write_seqcount_begin(&dentry->d_seq);
|
write_seqcount_begin(&dentry->d_seq);
|
||||||
@ -2275,6 +2272,20 @@ void d_move(struct dentry * dentry, struct dentry * target)
|
|||||||
spin_unlock(&target->d_lock);
|
spin_unlock(&target->d_lock);
|
||||||
fsnotify_d_move(dentry);
|
fsnotify_d_move(dentry);
|
||||||
spin_unlock(&dentry->d_lock);
|
spin_unlock(&dentry->d_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* d_move - move a dentry
|
||||||
|
* @dentry: entry to move
|
||||||
|
* @target: new dentry
|
||||||
|
*
|
||||||
|
* Update the dcache to reflect the move of a file name. Negative
|
||||||
|
* dcache entries should not be moved in this way.
|
||||||
|
*/
|
||||||
|
void d_move(struct dentry *dentry, struct dentry *target)
|
||||||
|
{
|
||||||
|
write_seqlock(&rename_lock);
|
||||||
|
__d_move(dentry, target);
|
||||||
write_sequnlock(&rename_lock);
|
write_sequnlock(&rename_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(d_move);
|
EXPORT_SYMBOL(d_move);
|
||||||
@ -2302,7 +2313,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
|
|||||||
* This helper attempts to cope with remotely renamed directories
|
* This helper attempts to cope with remotely renamed directories
|
||||||
*
|
*
|
||||||
* It assumes that the caller is already holding
|
* It assumes that the caller is already holding
|
||||||
* dentry->d_parent->d_inode->i_mutex and the inode->i_lock
|
* dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
|
||||||
*
|
*
|
||||||
* Note: If ever the locking in lock_rename() changes, then please
|
* Note: If ever the locking in lock_rename() changes, then please
|
||||||
* remember to update this too...
|
* remember to update this too...
|
||||||
@ -2317,11 +2328,6 @@ static struct dentry *__d_unalias(struct inode *inode,
|
|||||||
if (alias->d_parent == dentry->d_parent)
|
if (alias->d_parent == dentry->d_parent)
|
||||||
goto out_unalias;
|
goto out_unalias;
|
||||||
|
|
||||||
/* Check for loops */
|
|
||||||
ret = ERR_PTR(-ELOOP);
|
|
||||||
if (d_ancestor(alias, dentry))
|
|
||||||
goto out_err;
|
|
||||||
|
|
||||||
/* See lock_rename() */
|
/* See lock_rename() */
|
||||||
ret = ERR_PTR(-EBUSY);
|
ret = ERR_PTR(-EBUSY);
|
||||||
if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
|
if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
|
||||||
@ -2331,7 +2337,7 @@ static struct dentry *__d_unalias(struct inode *inode,
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
m2 = &alias->d_parent->d_inode->i_mutex;
|
m2 = &alias->d_parent->d_inode->i_mutex;
|
||||||
out_unalias:
|
out_unalias:
|
||||||
d_move(alias, dentry);
|
__d_move(alias, dentry);
|
||||||
ret = alias;
|
ret = alias;
|
||||||
out_err:
|
out_err:
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
@ -2416,15 +2422,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
|
|||||||
alias = __d_find_alias(inode, 0);
|
alias = __d_find_alias(inode, 0);
|
||||||
if (alias) {
|
if (alias) {
|
||||||
actual = alias;
|
actual = alias;
|
||||||
/* Is this an anonymous mountpoint that we could splice
|
write_seqlock(&rename_lock);
|
||||||
* into our tree? */
|
|
||||||
if (IS_ROOT(alias)) {
|
if (d_ancestor(alias, dentry)) {
|
||||||
|
/* Check for loops */
|
||||||
|
actual = ERR_PTR(-ELOOP);
|
||||||
|
} else if (IS_ROOT(alias)) {
|
||||||
|
/* Is this an anonymous mountpoint that we
|
||||||
|
* could splice into our tree? */
|
||||||
__d_materialise_dentry(dentry, alias);
|
__d_materialise_dentry(dentry, alias);
|
||||||
|
write_sequnlock(&rename_lock);
|
||||||
__d_drop(alias);
|
__d_drop(alias);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
} else {
|
||||||
/* Nope, but we must(!) avoid directory aliasing */
|
/* Nope, but we must(!) avoid directory
|
||||||
|
* aliasing */
|
||||||
actual = __d_unalias(inode, dentry, alias);
|
actual = __d_unalias(inode, dentry, alias);
|
||||||
|
}
|
||||||
|
write_sequnlock(&rename_lock);
|
||||||
if (IS_ERR(actual))
|
if (IS_ERR(actual))
|
||||||
dput(alias);
|
dput(alias);
|
||||||
goto out_nolock;
|
goto out_nolock;
|
||||||
|
@ -913,7 +913,7 @@ struct dentry *exofs_get_parent(struct dentry *child)
|
|||||||
unsigned long ino = exofs_parent_ino(child);
|
unsigned long ino = exofs_parent_ino(child);
|
||||||
|
|
||||||
if (!ino)
|
if (!ino)
|
||||||
return NULL;
|
return ERR_PTR(-ESTALE);
|
||||||
|
|
||||||
return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
|
return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
|
||||||
}
|
}
|
||||||
|
@ -976,16 +976,12 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
|
|||||||
|
|
||||||
pagevec_init(&pvec, 0);
|
pagevec_init(&pvec, 0);
|
||||||
next = 0;
|
next = 0;
|
||||||
while (next <= (loff_t)-1 &&
|
do {
|
||||||
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)
|
if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
|
||||||
) {
|
break;
|
||||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||||
struct page *page = pvec.pages[i];
|
struct page *page = pvec.pages[i];
|
||||||
pgoff_t page_index = page->index;
|
next = page->index;
|
||||||
|
|
||||||
ASSERTCMP(page_index, >=, next);
|
|
||||||
next = page_index + 1;
|
|
||||||
|
|
||||||
if (PageFsCache(page)) {
|
if (PageFsCache(page)) {
|
||||||
__fscache_wait_on_page_write(cookie, page);
|
__fscache_wait_on_page_write(cookie, page);
|
||||||
__fscache_uncache_page(cookie, page);
|
__fscache_uncache_page(cookie, page);
|
||||||
@ -993,7 +989,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
|
|||||||
}
|
}
|
||||||
pagevec_release(&pvec);
|
pagevec_release(&pvec);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
} while (++next);
|
||||||
|
|
||||||
_leave("");
|
_leave("");
|
||||||
}
|
}
|
||||||
|
@ -1069,6 +1069,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
gfs2_log_lock(sdp);
|
gfs2_log_lock(sdp);
|
||||||
|
spin_lock(&sdp->sd_ail_lock);
|
||||||
head = bh = page_buffers(page);
|
head = bh = page_buffers(page);
|
||||||
do {
|
do {
|
||||||
if (atomic_read(&bh->b_count))
|
if (atomic_read(&bh->b_count))
|
||||||
@ -1080,6 +1081,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
|
|||||||
goto not_possible;
|
goto not_possible;
|
||||||
bh = bh->b_this_page;
|
bh = bh->b_this_page;
|
||||||
} while(bh != head);
|
} while(bh != head);
|
||||||
|
spin_unlock(&sdp->sd_ail_lock);
|
||||||
gfs2_log_unlock(sdp);
|
gfs2_log_unlock(sdp);
|
||||||
|
|
||||||
head = bh = page_buffers(page);
|
head = bh = page_buffers(page);
|
||||||
@ -1112,6 +1114,7 @@ not_possible: /* Should never happen */
|
|||||||
WARN_ON(buffer_dirty(bh));
|
WARN_ON(buffer_dirty(bh));
|
||||||
WARN_ON(buffer_pinned(bh));
|
WARN_ON(buffer_pinned(bh));
|
||||||
cannot_release:
|
cannot_release:
|
||||||
|
spin_unlock(&sdp->sd_ail_lock);
|
||||||
gfs2_log_unlock(sdp);
|
gfs2_log_unlock(sdp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -47,10 +47,10 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl)
|
|||||||
bd_ail_gl_list);
|
bd_ail_gl_list);
|
||||||
bh = bd->bd_bh;
|
bh = bd->bd_bh;
|
||||||
gfs2_remove_from_ail(bd);
|
gfs2_remove_from_ail(bd);
|
||||||
spin_unlock(&sdp->sd_ail_lock);
|
|
||||||
|
|
||||||
bd->bd_bh = NULL;
|
bd->bd_bh = NULL;
|
||||||
bh->b_private = NULL;
|
bh->b_private = NULL;
|
||||||
|
spin_unlock(&sdp->sd_ail_lock);
|
||||||
|
|
||||||
bd->bd_blkno = bh->b_blocknr;
|
bd->bd_blkno = bh->b_blocknr;
|
||||||
gfs2_log_lock(sdp);
|
gfs2_log_lock(sdp);
|
||||||
gfs2_assert_withdraw(sdp, !buffer_busy(bh));
|
gfs2_assert_withdraw(sdp, !buffer_busy(bh));
|
||||||
@ -221,8 +221,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
|
if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
|
||||||
|
gfs2_log_flush(gl->gl_sbd, NULL);
|
||||||
gl->gl_sbd->sd_rindex_uptodate = 0;
|
gl->gl_sbd->sd_rindex_uptodate = 0;
|
||||||
|
}
|
||||||
if (ip && S_ISREG(ip->i_inode.i_mode))
|
if (ip && S_ISREG(ip->i_inode.i_mode))
|
||||||
truncate_inode_pages(ip->i_inode.i_mapping, 0);
|
truncate_inode_pages(ip->i_inode.i_mapping, 0);
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/rculist_bl.h>
|
#include <linux/rculist_bl.h>
|
||||||
|
#include <linux/completion.h>
|
||||||
|
|
||||||
#define DIO_WAIT 0x00000010
|
#define DIO_WAIT 0x00000010
|
||||||
#define DIO_METADATA 0x00000020
|
#define DIO_METADATA 0x00000020
|
||||||
@ -546,6 +547,7 @@ struct gfs2_sbd {
|
|||||||
struct gfs2_glock *sd_trans_gl;
|
struct gfs2_glock *sd_trans_gl;
|
||||||
wait_queue_head_t sd_glock_wait;
|
wait_queue_head_t sd_glock_wait;
|
||||||
atomic_t sd_glock_disposal;
|
atomic_t sd_glock_disposal;
|
||||||
|
struct completion sd_locking_init;
|
||||||
|
|
||||||
/* Inode Stuff */
|
/* Inode Stuff */
|
||||||
|
|
||||||
|
@ -903,6 +903,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
|
|||||||
if (gfs2_ail1_empty(sdp))
|
if (gfs2_ail1_empty(sdp))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
gfs2_log_flush(sdp, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
|
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
|
||||||
|
@ -72,6 +72,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||||||
|
|
||||||
init_waitqueue_head(&sdp->sd_glock_wait);
|
init_waitqueue_head(&sdp->sd_glock_wait);
|
||||||
atomic_set(&sdp->sd_glock_disposal, 0);
|
atomic_set(&sdp->sd_glock_disposal, 0);
|
||||||
|
init_completion(&sdp->sd_locking_init);
|
||||||
spin_lock_init(&sdp->sd_statfs_spin);
|
spin_lock_init(&sdp->sd_statfs_spin);
|
||||||
|
|
||||||
spin_lock_init(&sdp->sd_rindex_spin);
|
spin_lock_init(&sdp->sd_rindex_spin);
|
||||||
@ -1017,11 +1018,13 @@ hostdata_error:
|
|||||||
fsname++;
|
fsname++;
|
||||||
if (lm->lm_mount == NULL) {
|
if (lm->lm_mount == NULL) {
|
||||||
fs_info(sdp, "Now mounting FS...\n");
|
fs_info(sdp, "Now mounting FS...\n");
|
||||||
|
complete(&sdp->sd_locking_init);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
ret = lm->lm_mount(sdp, fsname);
|
ret = lm->lm_mount(sdp, fsname);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
fs_info(sdp, "Joined cluster. Now mounting FS...\n");
|
fs_info(sdp, "Joined cluster. Now mounting FS...\n");
|
||||||
|
complete(&sdp->sd_locking_init);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -757,13 +757,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||||||
struct timespec atime;
|
struct timespec atime;
|
||||||
struct gfs2_dinode *di;
|
struct gfs2_dinode *di;
|
||||||
int ret = -EAGAIN;
|
int ret = -EAGAIN;
|
||||||
|
int unlock_required = 0;
|
||||||
|
|
||||||
/* Skip timestamp update, if this is from a memalloc */
|
/* Skip timestamp update, if this is from a memalloc */
|
||||||
if (current->flags & PF_MEMALLOC)
|
if (current->flags & PF_MEMALLOC)
|
||||||
goto do_flush;
|
goto do_flush;
|
||||||
|
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
|
||||||
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto do_flush;
|
goto do_flush;
|
||||||
|
unlock_required = 1;
|
||||||
|
}
|
||||||
ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
|
ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto do_unlock;
|
goto do_unlock;
|
||||||
@ -780,6 +784,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||||||
}
|
}
|
||||||
gfs2_trans_end(sdp);
|
gfs2_trans_end(sdp);
|
||||||
do_unlock:
|
do_unlock:
|
||||||
|
if (unlock_required)
|
||||||
gfs2_glock_dq_uninit(&gh);
|
gfs2_glock_dq_uninit(&gh);
|
||||||
do_flush:
|
do_flush:
|
||||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||||
@ -1427,7 +1432,20 @@ out:
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
|
* gfs2_evict_inode - Remove an inode from cache
|
||||||
|
* @inode: The inode to evict
|
||||||
|
*
|
||||||
|
* There are three cases to consider:
|
||||||
|
* 1. i_nlink == 0, we are final opener (and must deallocate)
|
||||||
|
* 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
|
||||||
|
* 3. i_nlink > 0
|
||||||
|
*
|
||||||
|
* If the fs is read only, then we have to treat all cases as per #3
|
||||||
|
* since we are unable to do any deallocation. The inode will be
|
||||||
|
* deallocated by the next read/write node to attempt an allocation
|
||||||
|
* in the same resource group
|
||||||
|
*
|
||||||
* We have to (at the moment) hold the inodes main lock to cover
|
* We have to (at the moment) hold the inodes main lock to cover
|
||||||
* the gap between unlocking the shared lock on the iopen lock and
|
* the gap between unlocking the shared lock on the iopen lock and
|
||||||
* taking the exclusive lock. I'd rather do a shared -> exclusive
|
* taking the exclusive lock. I'd rather do a shared -> exclusive
|
||||||
@ -1470,6 +1488,8 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||||||
if (error)
|
if (error)
|
||||||
goto out_truncate;
|
goto out_truncate;
|
||||||
|
|
||||||
|
/* Case 1 starts here */
|
||||||
|
|
||||||
if (S_ISDIR(inode->i_mode) &&
|
if (S_ISDIR(inode->i_mode) &&
|
||||||
(ip->i_diskflags & GFS2_DIF_EXHASH)) {
|
(ip->i_diskflags & GFS2_DIF_EXHASH)) {
|
||||||
error = gfs2_dir_exhash_dealloc(ip);
|
error = gfs2_dir_exhash_dealloc(ip);
|
||||||
@ -1493,13 +1513,16 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
out_truncate:
|
out_truncate:
|
||||||
|
/* Case 2 starts here */
|
||||||
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
|
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
gfs2_final_release_pages(ip);
|
/* Needs to be done before glock release & also in a transaction */
|
||||||
|
truncate_inode_pages(&inode->i_data, 0);
|
||||||
gfs2_trans_end(sdp);
|
gfs2_trans_end(sdp);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
/* Error path for case 1 */
|
||||||
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
|
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
|
||||||
gfs2_glock_dq(&ip->i_iopen_gh);
|
gfs2_glock_dq(&ip->i_iopen_gh);
|
||||||
gfs2_holder_uninit(&ip->i_iopen_gh);
|
gfs2_holder_uninit(&ip->i_iopen_gh);
|
||||||
@ -1507,6 +1530,7 @@ out_unlock:
|
|||||||
if (error && error != GLR_TRYFAILED && error != -EROFS)
|
if (error && error != GLR_TRYFAILED && error != -EROFS)
|
||||||
fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
|
fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
|
||||||
out:
|
out:
|
||||||
|
/* Case 3 starts here */
|
||||||
truncate_inode_pages(&inode->i_data, 0);
|
truncate_inode_pages(&inode->i_data, 0);
|
||||||
end_writeback(inode);
|
end_writeback(inode);
|
||||||
|
|
||||||
|
@ -338,6 +338,9 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
|||||||
rv = sscanf(buf, "%u", &first);
|
rv = sscanf(buf, "%u", &first);
|
||||||
if (rv != 1 || first > 1)
|
if (rv != 1 || first > 1)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
rv = wait_for_completion_killable(&sdp->sd_locking_init);
|
||||||
|
if (rv)
|
||||||
|
return rv;
|
||||||
spin_lock(&sdp->sd_jindex_spin);
|
spin_lock(&sdp->sd_jindex_spin);
|
||||||
rv = -EBUSY;
|
rv = -EBUSY;
|
||||||
if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
|
if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
|
||||||
@ -414,7 +417,9 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
|||||||
rv = sscanf(buf, "%d", &jid);
|
rv = sscanf(buf, "%d", &jid);
|
||||||
if (rv != 1)
|
if (rv != 1)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
rv = wait_for_completion_killable(&sdp->sd_locking_init);
|
||||||
|
if (rv)
|
||||||
|
return rv;
|
||||||
spin_lock(&sdp->sd_jindex_spin);
|
spin_lock(&sdp->sd_jindex_spin);
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
|
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
|
||||||
|
@ -139,7 +139,8 @@ static int file_removed(struct dentry *dentry, const char *file)
|
|||||||
static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
|
static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
|
||||||
struct nameidata *nd)
|
struct nameidata *nd)
|
||||||
{
|
{
|
||||||
struct dentry *proc_dentry, *new, *parent;
|
struct dentry *proc_dentry, *parent;
|
||||||
|
struct qstr *name = &dentry->d_name;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
int err, deleted;
|
int err, deleted;
|
||||||
|
|
||||||
@ -149,23 +150,9 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
|
|||||||
else if (deleted)
|
else if (deleted)
|
||||||
return ERR_PTR(-ENOENT);
|
return ERR_PTR(-ENOENT);
|
||||||
|
|
||||||
err = -ENOMEM;
|
|
||||||
parent = HPPFS_I(ino)->proc_dentry;
|
parent = HPPFS_I(ino)->proc_dentry;
|
||||||
mutex_lock(&parent->d_inode->i_mutex);
|
mutex_lock(&parent->d_inode->i_mutex);
|
||||||
proc_dentry = d_lookup(parent, &dentry->d_name);
|
proc_dentry = lookup_one_len(name->name, parent, name->len);
|
||||||
if (proc_dentry == NULL) {
|
|
||||||
proc_dentry = d_alloc(parent, &dentry->d_name);
|
|
||||||
if (proc_dentry == NULL) {
|
|
||||||
mutex_unlock(&parent->d_inode->i_mutex);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
|
|
||||||
proc_dentry, NULL);
|
|
||||||
if (new) {
|
|
||||||
dput(proc_dentry);
|
|
||||||
proc_dentry = new;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mutex_unlock(&parent->d_inode->i_mutex);
|
mutex_unlock(&parent->d_inode->i_mutex);
|
||||||
|
|
||||||
if (IS_ERR(proc_dentry))
|
if (IS_ERR(proc_dentry))
|
||||||
@ -174,13 +161,11 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
inode = get_inode(ino->i_sb, proc_dentry);
|
inode = get_inode(ino->i_sb, proc_dentry);
|
||||||
if (!inode)
|
if (!inode)
|
||||||
goto out_dput;
|
goto out;
|
||||||
|
|
||||||
d_add(dentry, inode);
|
d_add(dentry, inode);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
out_dput:
|
|
||||||
dput(proc_dentry);
|
|
||||||
out:
|
out:
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
@ -690,8 +675,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
|
|||||||
struct inode *proc_ino = dentry->d_inode;
|
struct inode *proc_ino = dentry->d_inode;
|
||||||
struct inode *inode = new_inode(sb);
|
struct inode *inode = new_inode(sb);
|
||||||
|
|
||||||
if (!inode)
|
if (!inode) {
|
||||||
|
dput(dentry);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
if (S_ISDIR(dentry->d_inode->i_mode)) {
|
if (S_ISDIR(dentry->d_inode->i_mode)) {
|
||||||
inode->i_op = &hppfs_dir_iops;
|
inode->i_op = &hppfs_dir_iops;
|
||||||
@ -704,7 +691,7 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
|
|||||||
inode->i_fop = &hppfs_file_fops;
|
inode->i_fop = &hppfs_file_fops;
|
||||||
}
|
}
|
||||||
|
|
||||||
HPPFS_I(inode)->proc_dentry = dget(dentry);
|
HPPFS_I(inode)->proc_dentry = dentry;
|
||||||
|
|
||||||
inode->i_uid = proc_ino->i_uid;
|
inode->i_uid = proc_ino->i_uid;
|
||||||
inode->i_gid = proc_ino->i_gid;
|
inode->i_gid = proc_ino->i_gid;
|
||||||
@ -737,7 +724,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
|
|||||||
sb->s_fs_info = proc_mnt;
|
sb->s_fs_info = proc_mnt;
|
||||||
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
root_inode = get_inode(sb, proc_mnt->mnt_sb->s_root);
|
root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root));
|
||||||
if (!root_inode)
|
if (!root_inode)
|
||||||
goto out_mntput;
|
goto out_mntput;
|
||||||
|
|
||||||
|
@ -822,7 +822,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
attr->set_buf[size] = '\0';
|
attr->set_buf[size] = '\0';
|
||||||
val = simple_strtol(attr->set_buf, NULL, 0);
|
val = simple_strtoll(attr->set_buf, NULL, 0);
|
||||||
ret = attr->set(attr->data, val);
|
ret = attr->set(attr->data, val);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = len; /* on success, claim we got the whole input */
|
ret = len; /* on success, claim we got the whole input */
|
||||||
|
@ -433,6 +433,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
|
|||||||
goto err_parent;
|
goto err_parent;
|
||||||
BUG_ON(nd->inode != parent->d_inode);
|
BUG_ON(nd->inode != parent->d_inode);
|
||||||
} else {
|
} else {
|
||||||
|
if (dentry->d_parent != parent)
|
||||||
|
goto err_parent;
|
||||||
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
||||||
if (!__d_rcu_to_refcount(dentry, nd->seq))
|
if (!__d_rcu_to_refcount(dentry, nd->seq))
|
||||||
goto err_child;
|
goto err_child;
|
||||||
@ -940,7 +942,6 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
|
|||||||
* Don't forget we might have a non-mountpoint managed dentry
|
* Don't forget we might have a non-mountpoint managed dentry
|
||||||
* that wants to block transit.
|
* that wants to block transit.
|
||||||
*/
|
*/
|
||||||
*inode = path->dentry->d_inode;
|
|
||||||
if (unlikely(managed_dentry_might_block(path->dentry)))
|
if (unlikely(managed_dentry_might_block(path->dentry)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -953,6 +954,12 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
|
|||||||
path->mnt = mounted;
|
path->mnt = mounted;
|
||||||
path->dentry = mounted->mnt_root;
|
path->dentry = mounted->mnt_root;
|
||||||
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
|
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
|
||||||
|
/*
|
||||||
|
* Update the inode too. We don't need to re-check the
|
||||||
|
* dentry sequence number here after this d_inode read,
|
||||||
|
* because a mount-point is always pinned.
|
||||||
|
*/
|
||||||
|
*inode = path->dentry->d_inode;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -398,7 +398,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
|
|||||||
* this offset and save the original offset.
|
* this offset and save the original offset.
|
||||||
*/
|
*/
|
||||||
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
|
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
|
||||||
data->mds_offset = offset;
|
|
||||||
|
|
||||||
/* Perform an asynchronous write */
|
/* Perform an asynchronous write */
|
||||||
status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
|
status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user