Merge branch 'core/percpu' into x86/paravirt

This commit is contained in:
H. Peter Anvin 2009-02-04 16:58:26 -08:00
commit 327641da8e
68 changed files with 588 additions and 312 deletions

View File

@ -954,14 +954,14 @@ elevator_allow_merge_fn called whenever the block layer determines
results in some sort of conflict internally, results in some sort of conflict internally,
this hook allows it to do that. this hook allows it to do that.
elevator_dispatch_fn fills the dispatch queue with ready requests. elevator_dispatch_fn* fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by I/O schedulers are free to postpone requests by
not filling the dispatch queue unless @force not filling the dispatch queue unless @force
is non-zero. Once dispatched, I/O schedulers is non-zero. Once dispatched, I/O schedulers
are not allowed to manipulate the requests - are not allowed to manipulate the requests -
they belong to generic dispatch queue. they belong to generic dispatch queue.
elevator_add_req_fn called to add a new request into the scheduler elevator_add_req_fn* called to add a new request into the scheduler
elevator_queue_empty_fn returns true if the merge queue is empty. elevator_queue_empty_fn returns true if the merge queue is empty.
Drivers shouldn't use this, but rather check Drivers shouldn't use this, but rather check
@ -991,7 +991,7 @@ elevator_activate_req_fn Called when device driver first sees a request.
elevator_deactivate_req_fn Called when device driver decides to delay elevator_deactivate_req_fn Called when device driver decides to delay
a request by requeueing it. a request by requeueing it.
elevator_init_fn elevator_init_fn*
elevator_exit_fn Allocate and free any elevator specific storage elevator_exit_fn Allocate and free any elevator specific storage
for a queue. for a queue.

View File

@ -2,14 +2,14 @@
IP-Aliasing: IP-Aliasing:
============ ============
IP-aliases are additional IP-addresses/masks hooked up to a base IP-aliases are an obsolete way to manage multiple IP-addresses/masks
interface by adding a colon and a string when running ifconfig. per interface. Newer tools such as iproute2 support multiple
address/prefixes per interface, but aliases are still supported
for backwards compatibility.
An alias is formed by adding a colon and a string when running ifconfig.
This string is usually numeric, but this is not a must. This string is usually numeric, but this is not a must.
IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking)
is configured in the kernel.
o Alias creation. o Alias creation.
Alias creation is done by 'magic' interface naming: eg. to create a Alias creation is done by 'magic' interface naming: eg. to create a
200.1.1.1 alias for eth0 ... 200.1.1.1 alias for eth0 ...
@ -38,16 +38,3 @@ o Relationship with main device
If the base device is shut down the added aliases will be deleted If the base device is shut down the added aliases will be deleted
too. too.
Contact
-------
Please finger or e-mail me:
Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
Updated by Erik Schoenfelder <schoenfr@gaertner.DE>
; local variables:
; mode: indented-text
; mode: auto-fill
; end:

View File

@ -2836,8 +2836,6 @@ S: Maintained
MAC80211 MAC80211
P: Johannes Berg P: Johannes Berg
M: johannes@sipsolutions.net M: johannes@sipsolutions.net
P: Michael Wu
M: flamingice@sourmilk.net
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/ W: http://linuxwireless.org/
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git

View File

@ -768,7 +768,8 @@ extern int sysenter_setup(void);
extern struct desc_ptr early_gdt_descr; extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int); extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void); extern void switch_to_new_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void); extern void cpu_init(void);
static inline unsigned long get_debugctlmsr(void) static inline unsigned long get_debugctlmsr(void)

View File

@ -253,17 +253,8 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
/* Current gdt points %fs at the "master" per-cpu area: after this, void load_percpu_segment(int cpu)
* it's on the real one. */
void switch_to_new_gdt(void)
{ {
struct desc_ptr gdt_descr;
int cpu = smp_processor_id();
gdt_descr.address = (long)get_cpu_gdt_table(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
loadsegment(fs, __KERNEL_PERCPU); loadsegment(fs, __KERNEL_PERCPU);
#else #else
@ -272,6 +263,20 @@ void switch_to_new_gdt(void)
#endif #endif
} }
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
gdt_descr.address = (long)get_cpu_gdt_table(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
load_percpu_segment(cpu);
}
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
static void __cpuinit default_init(struct cpuinfo_x86 *c) static void __cpuinit default_init(struct cpuinfo_x86 *c)
@ -993,7 +998,7 @@ void __cpuinit cpu_init(void)
* and set up the GDT descriptor: * and set up the GDT descriptor:
*/ */
switch_to_new_gdt(); switch_to_new_gdt(cpu);
loadsegment(fs, 0); loadsegment(fs, 0);
load_idt((const struct desc_ptr *)&idt_descr); load_idt((const struct desc_ptr *)&idt_descr);
@ -1098,7 +1103,7 @@ void __cpuinit cpu_init(void)
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_idt(&idt_descr); load_idt(&idt_descr);
switch_to_new_gdt(); switch_to_new_gdt(cpu);
/* /*
* Set up and load the per-CPU TSS and LDT * Set up and load the per-CPU TSS and LDT

View File

@ -122,7 +122,7 @@ void __init setup_per_cpu_areas(void)
* area. Reload any changed state for the boot CPU. * area. Reload any changed state for the boot CPU.
*/ */
if (cpu == boot_cpu_id) if (cpu == boot_cpu_id)
switch_to_new_gdt(); switch_to_new_gdt(cpu);
DBG("PERCPU: cpu %4d %p\n", cpu, ptr); DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
} }

View File

@ -1185,7 +1185,7 @@ out:
void __init native_smp_prepare_boot_cpu(void) void __init native_smp_prepare_boot_cpu(void)
{ {
int me = smp_processor_id(); int me = smp_processor_id();
switch_to_new_gdt(); switch_to_new_gdt(me);
/* already set me in cpu_online_mask in boot_cpu_init() */ /* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask); cpumask_set_cpu(me, cpu_callout_mask);
per_cpu(cpu_state, me) = CPU_ONLINE; per_cpu(cpu_state, me) = CPU_ONLINE;

View File

@ -259,7 +259,7 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
* the cpu's, all of which are still in the mask. * the cpu's, all of which are still in the mask.
*/ */
__get_cpu_var(ptcstats).ptc_i++; __get_cpu_var(ptcstats).ptc_i++;
return 0; return flush_mask;
} }
/* /*

View File

@ -22,6 +22,7 @@ PHDRS {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
percpu PT_LOAD FLAGS(7); /* RWE */ percpu PT_LOAD FLAGS(7); /* RWE */
#endif #endif
data.init2 PT_LOAD FLAGS(7); /* RWE */
note PT_NOTE FLAGS(0); /* ___ */ note PT_NOTE FLAGS(0); /* ___ */
} }
SECTIONS SECTIONS
@ -215,7 +216,7 @@ SECTIONS
/* /*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - __data_nosave - should * output PHDR, so the next output section - __data_nosave - should
* switch it back to data.init. Also, pda should be at the head of * start another section data.init2. Also, pda should be at the head of
* percpu area. Preallocate it and define the percpu offset symbol * percpu area. Preallocate it and define the percpu offset symbol
* so that it can be accessed as a percpu variable. * so that it can be accessed as a percpu variable.
*/ */
@ -232,7 +233,7 @@ SECTIONS
__nosave_begin = .; __nosave_begin = .;
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
*(.data.nosave) *(.data.nosave)
} :data.init /* switch back to data.init, see PERCPU_VADDR() above */ } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__nosave_end = .; __nosave_end = .;

View File

@ -1746,12 +1746,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
static void __cpuinit voyager_smp_prepare_boot_cpu(void) static void __cpuinit voyager_smp_prepare_boot_cpu(void)
{ {
switch_to_new_gdt(); int cpu = smp_processor_id();
switch_to_new_gdt(cpu);
cpu_set(smp_processor_id(), cpu_online_map); cpu_set(cpu, cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map); cpu_set(cpu, cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map); cpu_set(cpu, cpu_possible_map);
cpu_set(smp_processor_id(), cpu_present_map); cpu_set(cpu, cpu_present_map);
} }
static int __cpuinit voyager_cpu_up(unsigned int cpu) static int __cpuinit voyager_cpu_up(unsigned int cpu)

View File

@ -917,6 +917,9 @@ asmlinkage void __init xen_start_kernel(void)
have_vcpu_info_placement = 0; have_vcpu_info_placement = 0;
#endif #endif
/* setup percpu state */
load_percpu_segment(0);
xen_smp_init(); xen_smp_init();
/* Get mfn list */ /* Get mfn list */

View File

@ -170,7 +170,8 @@ static void __init xen_smp_prepare_boot_cpu(void)
/* We've switched to the "real" per-cpu gdt, so make sure the /* We've switched to the "real" per-cpu gdt, so make sure the
old memory can be recycled */ old memory can be recycled */
make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); make_lowmem_page_readwrite(__per_cpu_load +
(unsigned long)&per_cpu_var(gdt_page));
xen_setup_vcpu_info_placement(); xen_setup_vcpu_info_placement();
} }
@ -235,6 +236,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->user_regs.ss = __KERNEL_DS; ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
ctxt->user_regs.fs = __KERNEL_PERCPU; ctxt->user_regs.fs = __KERNEL_PERCPU;
#else
ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif #endif
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */

View File

@ -302,7 +302,7 @@ static void bio_end_empty_barrier(struct bio *bio, int err)
* Description: * Description:
* Issue a flush for the block device in question. Caller can supply * Issue a flush for the block device in question. Caller can supply
* room for storing the error offset in case of a flush error, if they * room for storing the error offset in case of a flush error, if they
* wish to. Caller must run wait_for_completion() on its own. * wish to.
*/ */
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{ {

View File

@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue;
static void drive_stat_acct(struct request *rq, int new_io) static void drive_stat_acct(struct request *rq, int new_io)
{ {
struct gendisk *disk = rq->rq_disk;
struct hd_struct *part; struct hd_struct *part;
int rw = rq_data_dir(rq); int rw = rq_data_dir(rq);
int cpu; int cpu;
if (!blk_fs_request(rq) || !rq->rq_disk) if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue))
return; return;
cpu = part_stat_lock(); cpu = part_stat_lock();
@ -599,8 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
q->request_fn = rfn; q->request_fn = rfn;
q->prep_rq_fn = NULL; q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device; q->unplug_fn = generic_unplug_device;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER | q->queue_flags = QUEUE_FLAG_DEFAULT;
1 << QUEUE_FLAG_STACKABLE);
q->queue_lock = lock; q->queue_lock = lock;
blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
@ -1125,6 +1125,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
if (bio_sync(bio)) if (bio_sync(bio))
req->cmd_flags |= REQ_RW_SYNC; req->cmd_flags |= REQ_RW_SYNC;
if (bio_unplug(bio))
req->cmd_flags |= REQ_UNPLUG;
if (bio_rw_meta(bio)) if (bio_rw_meta(bio))
req->cmd_flags |= REQ_RW_META; req->cmd_flags |= REQ_RW_META;
@ -1141,6 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int el_ret, nr_sectors; int el_ret, nr_sectors;
const unsigned short prio = bio_prio(bio); const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio); const int sync = bio_sync(bio);
const int unplug = bio_unplug(bio);
int rw_flags; int rw_flags;
nr_sectors = bio_sectors(bio); nr_sectors = bio_sectors(bio);
@ -1244,7 +1247,7 @@ get_rq:
blk_plug_device(q); blk_plug_device(q);
add_request(q, req); add_request(q, req);
out: out:
if (sync || blk_queue_nonrot(q)) if (unplug || blk_queue_nonrot(q))
__generic_unplug_device(q); __generic_unplug_device(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return 0; return 0;
@ -1448,6 +1451,11 @@ static inline void __generic_make_request(struct bio *bio)
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto end_io; goto end_io;
} }
if (bio_barrier(bio) && bio_has_data(bio) &&
(q->next_ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP;
goto end_io;
}
ret = q->make_request_fn(q, bio); ret = q->make_request_fn(q, bio);
} while (ret); } while (ret);
@ -1655,6 +1663,55 @@ void blkdev_dequeue_request(struct request *req)
} }
EXPORT_SYMBOL(blkdev_dequeue_request); EXPORT_SYMBOL(blkdev_dequeue_request);
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
struct gendisk *disk = req->rq_disk;
if (!disk || !blk_queue_io_stat(disk->queue))
return;
if (blk_fs_request(req)) {
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
part = disk_map_sector_rcu(req->rq_disk, req->sector);
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
part_stat_unlock();
}
}
static void blk_account_io_done(struct request *req)
{
struct gendisk *disk = req->rq_disk;
if (!disk || !blk_queue_io_stat(disk->queue))
return;
/*
* Account IO completion. bar_rq isn't accounted as a normal
* IO on queueing nor completion. Accounting the containing
* request is enough.
*/
if (blk_fs_request(req) && req != &req->q->bar_rq) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
part = disk_map_sector_rcu(disk, req->sector);
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
part_dec_in_flight(part);
part_stat_unlock();
}
}
/** /**
* __end_that_request_first - end I/O on a request * __end_that_request_first - end I/O on a request
* @req: the request being processed * @req: the request being processed
@ -1690,16 +1747,7 @@ static int __end_that_request_first(struct request *req, int error,
(unsigned long long)req->sector); (unsigned long long)req->sector);
} }
if (blk_fs_request(req) && req->rq_disk) { blk_account_io_completion(req, nr_bytes);
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
part = disk_map_sector_rcu(req->rq_disk, req->sector);
part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
part_stat_unlock();
}
total_bytes = bio_nbytes = 0; total_bytes = bio_nbytes = 0;
while ((bio = req->bio) != NULL) { while ((bio = req->bio) != NULL) {
@ -1779,8 +1827,6 @@ static int __end_that_request_first(struct request *req, int error,
*/ */
static void end_that_request_last(struct request *req, int error) static void end_that_request_last(struct request *req, int error)
{ {
struct gendisk *disk = req->rq_disk;
if (blk_rq_tagged(req)) if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req); blk_queue_end_tag(req->q, req);
@ -1792,27 +1838,7 @@ static void end_that_request_last(struct request *req, int error)
blk_delete_timer(req); blk_delete_timer(req);
/* blk_account_io_done(req);
* Account IO completion. bar_rq isn't accounted as a normal
* IO on queueing nor completion. Accounting the containing
* request is enough.
*/
if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
part = disk_map_sector_rcu(disk, req->sector);
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
part_dec_in_flight(part);
part_stat_unlock();
}
if (req->end_io) if (req->end_io)
req->end_io(req, error); req->end_io(req, error);

View File

@ -309,24 +309,24 @@ static struct kobj_type integrity_ktype = {
/** /**
* blk_integrity_register - Register a gendisk as being integrity-capable * blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware * @disk: struct gendisk pointer to make integrity-aware
* @template: integrity profile * @template: optional integrity profile to register
* *
* Description: When a device needs to advertise itself as being able * Description: When a device needs to advertise itself as being able
* to send/receive integrity metadata it must use this function to * to send/receive integrity metadata it must use this function to
* register the capability with the block layer. The template is a * register the capability with the block layer. The template is a
* blk_integrity struct with values appropriate for the underlying * blk_integrity struct with values appropriate for the underlying
* hardware. See Documentation/block/data-integrity.txt. * hardware. If template is NULL the new profile is allocated but
* not filled out. See Documentation/block/data-integrity.txt.
*/ */
int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{ {
struct blk_integrity *bi; struct blk_integrity *bi;
BUG_ON(disk == NULL); BUG_ON(disk == NULL);
BUG_ON(template == NULL);
if (disk->integrity == NULL) { if (disk->integrity == NULL) {
bi = kmem_cache_alloc(integrity_cachep, bi = kmem_cache_alloc(integrity_cachep,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!bi) if (!bi)
return -1; return -1;
@ -346,13 +346,16 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
bi = disk->integrity; bi = disk->integrity;
/* Use the provided profile as template */ /* Use the provided profile as template */
bi->name = template->name; if (template != NULL) {
bi->generate_fn = template->generate_fn; bi->name = template->name;
bi->verify_fn = template->verify_fn; bi->generate_fn = template->generate_fn;
bi->tuple_size = template->tuple_size; bi->verify_fn = template->verify_fn;
bi->set_tag_fn = template->set_tag_fn; bi->tuple_size = template->tuple_size;
bi->get_tag_fn = template->get_tag_fn; bi->set_tag_fn = template->set_tag_fn;
bi->tag_size = template->tag_size; bi->get_tag_fn = template->get_tag_fn;
bi->tag_size = template->tag_size;
} else
bi->name = "unsupported";
return 0; return 0;
} }

View File

@ -130,6 +130,27 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_hw_sectors_kb, (page)); return queue_var_show(max_hw_sectors_kb, (page));
} }
static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
{
return queue_var_show(!blk_queue_nonrot(q), page);
}
static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
size_t count)
{
unsigned long nm;
ssize_t ret = queue_var_store(&nm, page, count);
spin_lock_irq(q->queue_lock);
if (nm)
queue_flag_clear(QUEUE_FLAG_NONROT, q);
else
queue_flag_set(QUEUE_FLAG_NONROT, q);
spin_unlock_irq(q->queue_lock);
return ret;
}
static ssize_t queue_nomerges_show(struct request_queue *q, char *page) static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{ {
return queue_var_show(blk_queue_nomerges(q), page); return queue_var_show(blk_queue_nomerges(q), page);
@ -146,8 +167,8 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else else
queue_flag_clear(QUEUE_FLAG_NOMERGES, q); queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return ret; return ret;
} }
@ -176,6 +197,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
return ret; return ret;
} }
static ssize_t queue_iostats_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_io_stat(q), page);
}
static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
size_t count)
{
unsigned long stats;
ssize_t ret = queue_var_store(&stats, page, count);
spin_lock_irq(q->queue_lock);
if (stats)
queue_flag_set(QUEUE_FLAG_IO_STAT, q);
else
queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
spin_unlock_irq(q->queue_lock);
return ret;
}
static struct queue_sysfs_entry queue_requests_entry = { static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show, .show = queue_requests_show,
@ -210,6 +252,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.show = queue_hw_sector_size_show, .show = queue_hw_sector_size_show,
}; };
static struct queue_sysfs_entry queue_nonrot_entry = {
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
.show = queue_nonrot_show,
.store = queue_nonrot_store,
};
static struct queue_sysfs_entry queue_nomerges_entry = { static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
.show = queue_nomerges_show, .show = queue_nomerges_show,
@ -222,6 +270,12 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
.store = queue_rq_affinity_store, .store = queue_rq_affinity_store,
}; };
static struct queue_sysfs_entry queue_iostats_entry = {
.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
.show = queue_iostats_show,
.store = queue_iostats_store,
};
static struct attribute *default_attrs[] = { static struct attribute *default_attrs[] = {
&queue_requests_entry.attr, &queue_requests_entry.attr,
&queue_ra_entry.attr, &queue_ra_entry.attr,
@ -229,8 +283,10 @@ static struct attribute *default_attrs[] = {
&queue_max_sectors_entry.attr, &queue_max_sectors_entry.attr,
&queue_iosched_entry.attr, &queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr, &queue_hw_sector_size_entry.attr,
&queue_nonrot_entry.attr,
&queue_nomerges_entry.attr, &queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr, &queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
NULL, NULL,
}; };

View File

@ -187,59 +187,12 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
static struct dentry *blk_tree_root; static struct dentry *blk_tree_root;
static DEFINE_MUTEX(blk_tree_mutex); static DEFINE_MUTEX(blk_tree_mutex);
static unsigned int root_users;
static inline void blk_remove_root(void)
{
if (blk_tree_root) {
debugfs_remove(blk_tree_root);
blk_tree_root = NULL;
}
}
static void blk_remove_tree(struct dentry *dir)
{
mutex_lock(&blk_tree_mutex);
debugfs_remove(dir);
if (--root_users == 0)
blk_remove_root();
mutex_unlock(&blk_tree_mutex);
}
static struct dentry *blk_create_tree(const char *blk_name)
{
struct dentry *dir = NULL;
int created = 0;
mutex_lock(&blk_tree_mutex);
if (!blk_tree_root) {
blk_tree_root = debugfs_create_dir("block", NULL);
if (!blk_tree_root)
goto err;
created = 1;
}
dir = debugfs_create_dir(blk_name, blk_tree_root);
if (dir)
root_users++;
else {
/* Delete root only if we created it */
if (created)
blk_remove_root();
}
err:
mutex_unlock(&blk_tree_mutex);
return dir;
}
static void blk_trace_cleanup(struct blk_trace *bt) static void blk_trace_cleanup(struct blk_trace *bt)
{ {
relay_close(bt->rchan);
debugfs_remove(bt->msg_file); debugfs_remove(bt->msg_file);
debugfs_remove(bt->dropped_file); debugfs_remove(bt->dropped_file);
blk_remove_tree(bt->dir); relay_close(bt->rchan);
free_percpu(bt->sequence); free_percpu(bt->sequence);
free_percpu(bt->msg_data); free_percpu(bt->msg_data);
kfree(bt); kfree(bt);
@ -346,7 +299,18 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
static int blk_remove_buf_file_callback(struct dentry *dentry) static int blk_remove_buf_file_callback(struct dentry *dentry)
{ {
struct dentry *parent = dentry->d_parent;
debugfs_remove(dentry); debugfs_remove(dentry);
/*
* this will fail for all but the last file, but that is ok. what we
* care about is the top level buts->name directory going away, when
* the last trace file is gone. Then we don't have to rmdir() that
* manually on trace stop, so it nicely solves the issue with
* force killing of running traces.
*/
debugfs_remove(parent);
return 0; return 0;
} }
@ -404,7 +368,15 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
goto err; goto err;
ret = -ENOENT; ret = -ENOENT;
dir = blk_create_tree(buts->name);
if (!blk_tree_root) {
blk_tree_root = debugfs_create_dir("block", NULL);
if (!blk_tree_root)
return -ENOMEM;
}
dir = debugfs_create_dir(buts->name, blk_tree_root);
if (!dir) if (!dir)
goto err; goto err;
@ -458,8 +430,6 @@ probe_err:
atomic_dec(&blk_probes_ref); atomic_dec(&blk_probes_ref);
mutex_unlock(&blk_probe_mutex); mutex_unlock(&blk_probe_mutex);
err: err:
if (dir)
blk_remove_tree(dir);
if (bt) { if (bt) {
if (bt->msg_file) if (bt->msg_file)
debugfs_remove(bt->msg_file); debugfs_remove(bt->msg_file);

View File

@ -84,6 +84,11 @@ struct cfq_data {
*/ */
struct cfq_rb_root service_tree; struct cfq_rb_root service_tree;
unsigned int busy_queues; unsigned int busy_queues;
/*
* Used to track any pending rt requests so we can pre-empt current
* non-RT cfqq in service when this value is non-zero.
*/
unsigned int busy_rt_queues;
int rq_in_driver; int rq_in_driver;
int sync_flight; int sync_flight;
@ -562,6 +567,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq)); BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq); cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++; cfqd->busy_queues++;
if (cfq_class_rt(cfqq))
cfqd->busy_rt_queues++;
cfq_resort_rr_list(cfqd, cfqq); cfq_resort_rr_list(cfqd, cfqq);
} }
@ -581,6 +588,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(!cfqd->busy_queues); BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--; cfqd->busy_queues--;
if (cfq_class_rt(cfqq))
cfqd->busy_rt_queues--;
} }
/* /*
@ -1004,6 +1013,20 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
if (cfq_slice_used(cfqq)) if (cfq_slice_used(cfqq))
goto expire; goto expire;
/*
* If we have a RT cfqq waiting, then we pre-empt the current non-rt
* cfqq.
*/
if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
/*
* We simulate this as cfqq timed out so that it gets to bank
* the remaining of its time slice.
*/
cfq_log_cfqq(cfqd, cfqq, "preempt");
cfq_slice_expired(cfqd, 1);
goto new_queue;
}
/* /*
* The active queue has requests and isn't expired, allow it to * The active queue has requests and isn't expired, allow it to
* dispatch. * dispatch.
@ -1067,6 +1090,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (RB_EMPTY_ROOT(&cfqq->sort_list)) if (RB_EMPTY_ROOT(&cfqq->sort_list))
break; break;
/*
* If there is a non-empty RT cfqq waiting for current
* cfqq's timeslice to complete, pre-empt this cfqq
*/
if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
break;
} while (dispatched < max_dispatch); } while (dispatched < max_dispatch);
/* /*
@ -1801,6 +1831,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
if (rq_is_meta(rq) && !cfqq->meta_pending) if (rq_is_meta(rq) && !cfqq->meta_pending)
return 1; return 1;
/*
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
*/
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
return 1;
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
return 0; return 0;
@ -1870,7 +1906,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/* /*
* not the active queue - expire current slice if it is * not the active queue - expire current slice if it is
* idle and has expired it's mean thinktime or this new queue * idle and has expired it's mean thinktime or this new queue
* has some old slice time left and is of higher priority * has some old slice time left and is of higher priority or
* this new queue is RT and the current one is BE
*/ */
cfq_preempt_queue(cfqd, cfqq); cfq_preempt_queue(cfqd, cfqq);
cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_must_dispatch(cfqq);

View File

@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000"; char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#define DRV_VERSION "7.3.20-k3-NAPI" #define DRV_VERSION "7.3.21-k3-NAPI"
const char e1000_driver_version[] = DRV_VERSION; const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@ -3712,7 +3712,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 rctl, icr = er32(ICR); u32 rctl, icr = er32(ICR);
if (unlikely(!icr)) if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
return IRQ_NONE; /* Not our interrupt */ return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is

View File

@ -234,6 +234,8 @@ static int gfar_mdio_probe(struct of_device *ofdev,
if (NULL == new_bus) if (NULL == new_bus)
return -ENOMEM; return -ENOMEM;
device_init_wakeup(&ofdev->dev, 1);
new_bus->name = "Gianfar MII Bus", new_bus->name = "Gianfar MII Bus",
new_bus->read = &gfar_mdio_read, new_bus->read = &gfar_mdio_read,
new_bus->write = &gfar_mdio_write, new_bus->write = &gfar_mdio_write,

View File

@ -210,7 +210,7 @@
#define MAX_CMD_DESCRIPTORS_HOST 1024 #define MAX_CMD_DESCRIPTORS_HOST 1024
#define MAX_RCV_DESCRIPTORS_1G 2048 #define MAX_RCV_DESCRIPTORS_1G 2048
#define MAX_RCV_DESCRIPTORS_10G 4096 #define MAX_RCV_DESCRIPTORS_10G 4096
#define MAX_JUMBO_RCV_DESCRIPTORS 512 #define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 8 #define MAX_LRO_RCV_DESCRIPTORS 8
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS

View File

@ -947,8 +947,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
} }
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
kfree(buf);
return -EIO; return -EIO;
}
buf[i].addr = addr; buf[i].addr = addr;
buf[i].data = val; buf[i].data = val;

View File

@ -438,7 +438,6 @@ static void r6040_down(struct net_device *dev)
{ {
struct r6040_private *lp = netdev_priv(dev); struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base; void __iomem *ioaddr = lp->base;
struct pci_dev *pdev = lp->pdev;
int limit = 2048; int limit = 2048;
u16 *adrp; u16 *adrp;
u16 cmd; u16 cmd;

View File

@ -1003,9 +1003,9 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break; break;
case SKFP_CLR_STATS: /* Zero out the driver statistics */ case SKFP_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN)) { if (!capable(CAP_NET_ADMIN)) {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
} else {
status = -EPERM; status = -EPERM;
} else {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
} }
break; break;
default: default:

View File

@ -953,7 +953,7 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
do { do {
udelay(1); udelay(1);
val = smsc911x_reg_read(pdata, RX_DP_CTRL); val = smsc911x_reg_read(pdata, RX_DP_CTRL);
} while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_)); } while (--timeout && (val & RX_DP_CTRL_RX_FFWD_));
if (unlikely(timeout == 0)) if (unlikely(timeout == 0))
SMSC_WARNING(HW, "Timed out waiting for " SMSC_WARNING(HW, "Timed out waiting for "

View File

@ -1378,6 +1378,7 @@ static int smsc9420_open(struct net_device *dev)
/* test the IRQ connection to the ISR */ /* test the IRQ connection to the ISR */
smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
pd->software_irq_signal = false;
spin_lock_irqsave(&pd->int_lock, flags); spin_lock_irqsave(&pd->int_lock, flags);
/* configure interrupt deassertion timer and enable interrupts */ /* configure interrupt deassertion timer and enable interrupts */
@ -1393,8 +1394,6 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_pci_flush_write(pd); smsc9420_pci_flush_write(pd);
timeout = 1000; timeout = 1000;
pd->software_irq_signal = false;
smp_wmb();
while (timeout--) { while (timeout--) {
if (pd->software_irq_signal) if (pd->software_irq_signal)
break; break;

View File

@ -9,6 +9,11 @@
Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
for more information on this driver. for more information on this driver.
DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
Hardware Reference Manual" is currently available at :
http://developer.intel.com/design/network/manuals/278074.htm
Please submit bugs to http://bugzilla.kernel.org/ . Please submit bugs to http://bugzilla.kernel.org/ .
*/ */
@ -32,7 +37,11 @@ void t21142_media_task(struct work_struct *work)
int csr12 = ioread32(ioaddr + CSR12); int csr12 = ioread32(ioaddr + CSR12);
int next_tick = 60*HZ; int next_tick = 60*HZ;
int new_csr6 = 0; int new_csr6 = 0;
int csr14 = ioread32(ioaddr + CSR14);
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 2) if (tulip_debug > 2)
printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n", printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
dev->name, csr12, medianame[dev->if_port]); dev->name, csr12, medianame[dev->if_port]);
@ -76,7 +85,7 @@ void t21142_media_task(struct work_struct *work)
new_csr6 = 0x83860000; new_csr6 = 0x83860000;
dev->if_port = 3; dev->if_port = 3;
iowrite32(0, ioaddr + CSR13); iowrite32(0, ioaddr + CSR13);
iowrite32(0x0003FF7F, ioaddr + CSR14); iowrite32(0x0003FFFF, ioaddr + CSR14);
iowrite16(8, ioaddr + CSR15); iowrite16(8, ioaddr + CSR15);
iowrite32(1, ioaddr + CSR13); iowrite32(1, ioaddr + CSR13);
} }
@ -132,10 +141,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
struct tulip_private *tp = netdev_priv(dev); struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr; void __iomem *ioaddr = tp->base_addr;
int csr12 = ioread32(ioaddr + CSR12); int csr12 = ioread32(ioaddr + CSR12);
int csr14 = ioread32(ioaddr + CSR14);
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 1) if (tulip_debug > 1)
printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, " printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
"%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14)); "%8.8x.\n", dev->name, csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */ /* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@ -143,7 +156,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
int negotiated = tp->sym_advertise & (csr12 >> 16); int negotiated = tp->sym_advertise & (csr12 >> 16);
tp->lpar = csr12 >> 16; tp->lpar = csr12 >> 16;
tp->nwayset = 1; tp->nwayset = 1;
if (negotiated & 0x0100) dev->if_port = 5; /* If partner cannot negotiate, it is 10Mbps Half Duplex */
if (!(csr12 & 0x8000)) dev->if_port = 0;
else if (negotiated & 0x0100) dev->if_port = 5;
else if (negotiated & 0x0080) dev->if_port = 3; else if (negotiated & 0x0080) dev->if_port = 3;
else if (negotiated & 0x0040) dev->if_port = 4; else if (negotiated & 0x0040) dev->if_port = 4;
else if (negotiated & 0x0020) dev->if_port = 0; else if (negotiated & 0x0020) dev->if_port = 0;
@ -214,7 +229,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
tp->timer.expires = RUN_AT(3*HZ); tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer); add_timer(&tp->timer);
} else if (dev->if_port == 5) } else if (dev->if_port == 5)
iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14); iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) { } else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0) if ((csr12 & 4) == 0)
printk(KERN_INFO"%s: 21143 10baseT link beat good.\n", printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",

View File

@ -1536,6 +1536,11 @@ static void adjust_link(struct net_device *dev)
static int init_phy(struct net_device *dev) static int init_phy(struct net_device *dev)
{ {
struct ucc_geth_private *priv = netdev_priv(dev); struct ucc_geth_private *priv = netdev_priv(dev);
struct device_node *np = priv->node;
struct device_node *phy, *mdio;
const phandle *ph;
char bus_name[MII_BUS_ID_SIZE];
const unsigned int *id;
struct phy_device *phydev; struct phy_device *phydev;
char phy_id[BUS_ID_SIZE]; char phy_id[BUS_ID_SIZE];
@ -1543,8 +1548,18 @@ static int init_phy(struct net_device *dev)
priv->oldspeed = 0; priv->oldspeed = 0;
priv->oldduplex = -1; priv->oldduplex = -1;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus, ph = of_get_property(np, "phy-handle", NULL);
priv->ug_info->phy_address); phy = of_find_node_by_phandle(*ph);
mdio = of_get_parent(phy);
id = of_get_property(phy, "reg", NULL);
of_node_put(phy);
of_node_put(mdio);
uec_mdio_bus_name(bus_name, mdio);
snprintf(phy_id, sizeof(phy_id), "%s:%02x",
bus_name, *id);
phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
@ -3748,6 +3763,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth->ug_info = ug_info; ugeth->ug_info = ug_info;
ugeth->dev = dev; ugeth->dev = dev;
ugeth->node = np;
return 0; return 0;
} }

View File

@ -1186,6 +1186,8 @@ struct ucc_geth_private {
int oldspeed; int oldspeed;
int oldduplex; int oldduplex;
int oldlink; int oldlink;
struct device_node *node;
}; };
void uec_set_ethtool_ops(struct net_device *netdev); void uec_set_ethtool_ops(struct net_device *netdev);

View File

@ -156,7 +156,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma
if (err) if (err)
goto reg_map_fail; goto reg_map_fail;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); uec_mdio_bus_name(new_bus->id, np);
new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
@ -283,3 +283,13 @@ void uec_mdio_exit(void)
{ {
of_unregister_platform_driver(&uec_mdio_driver); of_unregister_platform_driver(&uec_mdio_driver);
} }
void uec_mdio_bus_name(char *name, struct device_node *np)
{
const u32 *reg;
reg = of_get_property(np, "reg", NULL);
snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
}

View File

@ -97,4 +97,5 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
int __init uec_mdio_init(void); int __init uec_mdio_init(void);
void uec_mdio_exit(void); void uec_mdio_exit(void);
void uec_mdio_bus_name(char *name, struct device_node *np);
#endif /* __UEC_MII_H */ #endif /* __UEC_MII_H */

View File

@ -287,7 +287,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
skb_put(skb, MAX_PACKET_LEN); skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb); hdr = skb_vnet_hdr(skb);
sg_init_one(sg, hdr, sizeof(*hdr)); sg_set_buf(sg, hdr, sizeof(*hdr));
if (vi->big_packets) { if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) { for (i = 0; i < MAX_SKB_FRAGS; i++) {
@ -488,9 +488,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */ /* Encode metadata header at front. */
if (vi->mergeable_rx_bufs) if (vi->mergeable_rx_bufs)
sg_init_one(sg, mhdr, sizeof(*mhdr)); sg_set_buf(sg, mhdr, sizeof(*mhdr));
else else
sg_init_one(sg, hdr, sizeof(*hdr)); sg_set_buf(sg, hdr, sizeof(*hdr));
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;

View File

@ -234,20 +234,6 @@ struct dentry *debugfs_create_i2400m_reset(
&fops_i2400m_reset); &fops_i2400m_reset);
} }
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(control),
D_SUBMODULE_DEFINE(driver),
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(fw),
D_SUBMODULE_DEFINE(netdev),
D_SUBMODULE_DEFINE(rfkill),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(tx),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \ #define __debugfs_register(prefix, name, parent) \
do { \ do { \

View File

@ -707,6 +707,22 @@ void i2400m_release(struct i2400m *i2400m)
EXPORT_SYMBOL_GPL(i2400m_release); EXPORT_SYMBOL_GPL(i2400m_release);
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(control),
D_SUBMODULE_DEFINE(driver),
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(fw),
D_SUBMODULE_DEFINE(netdev),
D_SUBMODULE_DEFINE(rfkill),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(tx),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
static static
int __init i2400m_driver_init(void) int __init i2400m_driver_init(void)
{ {

View File

@ -1028,6 +1028,8 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
* it's done by reseting the chip. To accomplish this we must * it's done by reseting the chip. To accomplish this we must
* first cleanup any pending DMA, then restart stuff after a la * first cleanup any pending DMA, then restart stuff after a la
* ath5k_init. * ath5k_init.
*
* Called with sc->lock.
*/ */
static int static int
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
@ -2814,11 +2816,17 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
{ {
struct ath5k_softc *sc = hw->priv; struct ath5k_softc *sc = hw->priv;
struct ieee80211_conf *conf = &hw->conf; struct ieee80211_conf *conf = &hw->conf;
int ret;
mutex_lock(&sc->lock);
sc->bintval = conf->beacon_int; sc->bintval = conf->beacon_int;
sc->power_level = conf->power_level; sc->power_level = conf->power_level;
return ath5k_chan_set(sc, conf->channel); ret = ath5k_chan_set(sc, conf->channel);
mutex_unlock(&sc->lock);
return ret;
} }
static int static int

View File

@ -1719,6 +1719,10 @@ static int iwl_read_ucode(struct iwl_priv *priv)
priv->ucode_data_backup.len = data_size; priv->ucode_data_backup.len = data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
!priv->ucode_data_backup.v_addr)
goto err_pci_alloc;
/* Initialization instructions and data */ /* Initialization instructions and data */
if (init_size && init_data_size) { if (init_size && init_data_size) {
priv->ucode_init.len = init_size; priv->ucode_init.len = init_size;

View File

@ -285,7 +285,10 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
ofdm_power = priv->channels[channel - 1].hw_value >> 4; ofdm_power = priv->channels[channel - 1].hw_value >> 4;
cck_power = min(cck_power, (u8)11); cck_power = min(cck_power, (u8)11);
ofdm_power = min(ofdm_power, (u8)35); if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1); rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
@ -536,7 +539,10 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
cck_power += priv->txpwr_base & 0xF; cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35); cck_power = min(cck_power, (u8)35);
ofdm_power = min(ofdm_power, (u8)15); if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
ofdm_power += priv->txpwr_base >> 4; ofdm_power += priv->txpwr_base >> 4;
ofdm_power = min(ofdm_power, (u8)35); ofdm_power = min(ofdm_power, (u8)35);

View File

@ -161,6 +161,11 @@ static void jsm_tty_stop_rx(struct uart_port *port)
channel->ch_bd->bd_ops->disable_receiver(channel); channel->ch_bd->bd_ops->disable_receiver(channel);
} }
static void jsm_tty_enable_ms(struct uart_port *port)
{
/* Nothing needed */
}
static void jsm_tty_break(struct uart_port *port, int break_state) static void jsm_tty_break(struct uart_port *port, int break_state)
{ {
unsigned long lock_flags; unsigned long lock_flags;
@ -345,6 +350,7 @@ static struct uart_ops jsm_ops = {
.start_tx = jsm_tty_start_tx, .start_tx = jsm_tty_start_tx,
.send_xchar = jsm_tty_send_xchar, .send_xchar = jsm_tty_send_xchar,
.stop_rx = jsm_tty_stop_rx, .stop_rx = jsm_tty_stop_rx,
.enable_ms = jsm_tty_enable_ms,
.break_ctl = jsm_tty_break, .break_ctl = jsm_tty_break,
.startup = jsm_tty_open, .startup = jsm_tty_open,
.shutdown = jsm_tty_close, .shutdown = jsm_tty_close,

View File

@ -140,7 +140,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip_vec_idx(bip, bip->bip_vcnt); iv = bip_vec_idx(bip, bip->bip_vcnt);
BUG_ON(iv == NULL); BUG_ON(iv == NULL);
BUG_ON(iv->bv_page != NULL);
iv->bv_page = page; iv->bv_page = page;
iv->bv_len = len; iv->bv_len = len;
@ -465,7 +464,7 @@ static int bio_integrity_verify(struct bio *bio)
if (ret) { if (ret) {
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
break; return ret;
} }
sectors = bv->bv_len / bi->sector_size; sectors = bv->bv_len / bi->sector_size;
@ -493,18 +492,13 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip = struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work); container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio; struct bio *bio = bip->bip_bio;
int error = bip->bip_error; int error;
if (bio_integrity_verify(bio)) { error = bio_integrity_verify(bio);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
error = -EIO;
}
/* Restore original bio completion handler */ /* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io; bio->bi_end_io = bip->bip_end_io;
bio_endio(bio, error);
if (bio->bi_end_io)
bio->bi_end_io(bio, error);
} }
/** /**
@ -525,7 +519,17 @@ void bio_integrity_endio(struct bio *bio, int error)
BUG_ON(bip->bip_bio != bio); BUG_ON(bip->bip_bio != bio);
bip->bip_error = error; /* In case of an I/O error there is no point in verifying the
* integrity metadata. Restore original bio end_io handler
* and run it.
*/
if (error) {
bio->bi_end_io = bip->bip_end_io;
bio_endio(bio, error);
return;
}
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work); queue_work(kintegrityd_wq, &bip->bip_work);
} }

View File

@ -538,6 +538,7 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
* cannot be fixed without breaking all existing apps. * cannot be fixed without breaking all existing apps.
*/ */
case TUNSETIFF: case TUNSETIFF:
case TUNGETIFF:
case SIOCGIFFLAGS: case SIOCGIFFLAGS:
case SIOCGIFMETRIC: case SIOCGIFMETRIC:
case SIOCGIFMTU: case SIOCGIFMTU:
@ -1982,6 +1983,11 @@ COMPATIBLE_IOCTL(TUNSETNOCSUM)
COMPATIBLE_IOCTL(TUNSETDEBUG) COMPATIBLE_IOCTL(TUNSETDEBUG)
COMPATIBLE_IOCTL(TUNSETPERSIST) COMPATIBLE_IOCTL(TUNSETPERSIST)
COMPATIBLE_IOCTL(TUNSETOWNER) COMPATIBLE_IOCTL(TUNSETOWNER)
COMPATIBLE_IOCTL(TUNSETLINK)
COMPATIBLE_IOCTL(TUNSETGROUP)
COMPATIBLE_IOCTL(TUNGETFEATURES)
COMPATIBLE_IOCTL(TUNSETOFFLOAD)
COMPATIBLE_IOCTL(TUNSETTXFILTER)
/* Big V */ /* Big V */
COMPATIBLE_IOCTL(VT_SETMODE) COMPATIBLE_IOCTL(VT_SETMODE)
COMPATIBLE_IOCTL(VT_GETMODE) COMPATIBLE_IOCTL(VT_GETMODE)
@ -2573,6 +2579,7 @@ HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(TUNSETIFF, dev_ifsioc) HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl) HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl) HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl) HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)

View File

@ -1358,7 +1358,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct fake_dirent *fde; struct fake_dirent *fde;
blocksize = dir->i_sb->s_blocksize; blocksize = dir->i_sb->s_blocksize;
dxtrace(printk("Creating index\n")); dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
retval = ext3_journal_get_write_access(handle, bh); retval = ext3_journal_get_write_access(handle, bh);
if (retval) { if (retval) {
ext3_std_error(dir->i_sb, retval); ext3_std_error(dir->i_sb, retval);
@ -1367,6 +1367,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
} }
root = (struct dx_root *) bh->b_data; root = (struct dx_root *) bh->b_data;
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext3_dir_entry_2 *)((char *)fde +
ext3_rec_len_from_disk(fde->rec_len));
if ((char *) de >= (((char *) root) + blocksize)) {
ext3_error(dir->i_sb, __func__,
"invalid rec_len for '..' in inode %lu",
dir->i_ino);
brelse(bh);
return -EIO;
}
len = ((char *) root) + blocksize - (char *) de;
bh2 = ext3_append (handle, dir, &block, &retval); bh2 = ext3_append (handle, dir, &block, &retval);
if (!(bh2)) { if (!(bh2)) {
brelse(bh); brelse(bh);
@ -1375,11 +1388,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
EXT3_I(dir)->i_flags |= EXT3_INDEX_FL; EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
data1 = bh2->b_data; data1 = bh2->b_data;
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext3_dir_entry_2 *)((char *)fde +
ext3_rec_len_from_disk(fde->rec_len));
len = ((char *) root) + blocksize - (char *) de;
memcpy (data1, de, len); memcpy (data1, de, len);
de = (struct ext3_dir_entry_2 *) data1; de = (struct ext3_dir_entry_2 *) data1;
top = data1 + len; top = data1 + len;

View File

@ -684,15 +684,15 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
gdp = ext4_get_group_desc(sb, i, NULL); gdp = ext4_get_group_desc(sb, i, NULL);
if (!gdp) if (!gdp)
continue; continue;
desc_count += le16_to_cpu(gdp->bg_free_blocks_count); desc_count += ext4_free_blks_count(sb, gdp);
brelse(bitmap_bh); brelse(bitmap_bh);
bitmap_bh = ext4_read_block_bitmap(sb, i); bitmap_bh = ext4_read_block_bitmap(sb, i);
if (bitmap_bh == NULL) if (bitmap_bh == NULL)
continue; continue;
x = ext4_count_free(bitmap_bh, sb->s_blocksize); x = ext4_count_free(bitmap_bh, sb->s_blocksize);
printk(KERN_DEBUG "group %lu: stored = %d, counted = %u\n", printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
i, le16_to_cpu(gdp->bg_free_blocks_count), x); i, ext4_free_blks_count(sb, gdp), x);
bitmap_count += x; bitmap_count += x;
} }
brelse(bitmap_bh); brelse(bitmap_bh);

View File

@ -1206,8 +1206,11 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
static inline loff_t ext4_isize(struct ext4_inode *raw_inode) static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
{ {
return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
le32_to_cpu(raw_inode->i_size_lo); return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
le32_to_cpu(raw_inode->i_size_lo);
else
return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
} }
static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)

View File

@ -3048,7 +3048,7 @@ retry:
WARN_ON(ret <= 0); WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_get_blocks " printk(KERN_ERR "%s: ext4_ext_get_blocks "
"returned error inode#%lu, block=%u, " "returned error inode#%lu, block=%u, "
"max_blocks=%lu", __func__, "max_blocks=%u", __func__,
inode->i_ino, block, max_blocks); inode->i_ino, block, max_blocks);
#endif #endif
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);

View File

@ -360,9 +360,9 @@ static int ext4_block_to_path(struct inode *inode,
final = ptrs; final = ptrs;
} else { } else {
ext4_warning(inode->i_sb, "ext4_block_to_path", ext4_warning(inode->i_sb, "ext4_block_to_path",
"block %lu > max", "block %lu > max in inode %lu",
i_block + direct_blocks + i_block + direct_blocks +
indirect_blocks + double_blocks); indirect_blocks + double_blocks, inode->i_ino);
} }
if (boundary) if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1)); *boundary = final - 1 - (i_block & (ptrs - 1));
@ -2821,9 +2821,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
filemap_write_and_wait(mapping); filemap_write_and_wait(mapping);
} }
BUG_ON(!EXT4_JOURNAL(inode) &&
EXT4_I(inode)->i_state & EXT4_STATE_JDATA);
if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
/* /*
* This is a REALLY heavyweight approach, but the use of * This is a REALLY heavyweight approach, but the use of
@ -3622,7 +3619,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
* block pointed to itself, it would have been detached when * block pointed to itself, it would have been detached when
* the block was cleared. Check for this instead of OOPSing. * the block was cleared. Check for this instead of OOPSing.
*/ */
if (bh2jh(this_bh)) if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
ext4_handle_dirty_metadata(handle, inode, this_bh); ext4_handle_dirty_metadata(handle, inode, this_bh);
else else
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb, __func__,

View File

@ -3025,7 +3025,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
goto out_err; goto out_err;
ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
gdp->bg_free_blocks_count); ext4_free_blks_count(sb, gdp));
err = ext4_journal_get_write_access(handle, gdp_bh); err = ext4_journal_get_write_access(handle, gdp_bh);
if (err) if (err)

View File

@ -1368,7 +1368,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct fake_dirent *fde; struct fake_dirent *fde;
blocksize = dir->i_sb->s_blocksize; blocksize = dir->i_sb->s_blocksize;
dxtrace(printk(KERN_DEBUG "Creating index\n")); dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
retval = ext4_journal_get_write_access(handle, bh); retval = ext4_journal_get_write_access(handle, bh);
if (retval) { if (retval) {
ext4_std_error(dir->i_sb, retval); ext4_std_error(dir->i_sb, retval);
@ -1377,6 +1377,20 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
} }
root = (struct dx_root *) bh->b_data; root = (struct dx_root *) bh->b_data;
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len));
if ((char *) de >= (((char *) root) + blocksize)) {
ext4_error(dir->i_sb, __func__,
"invalid rec_len for '..' in inode %lu",
dir->i_ino);
brelse(bh);
return -EIO;
}
len = ((char *) root) + blocksize - (char *) de;
/* Allocate new block for the 0th block's dirents */
bh2 = ext4_append(handle, dir, &block, &retval); bh2 = ext4_append(handle, dir, &block, &retval);
if (!(bh2)) { if (!(bh2)) {
brelse(bh); brelse(bh);
@ -1385,11 +1399,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
EXT4_I(dir)->i_flags |= EXT4_INDEX_FL; EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
data1 = bh2->b_data; data1 = bh2->b_data;
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len));
len = ((char *) root) + blocksize - (char *) de;
memcpy (data1, de, len); memcpy (data1, de, len);
de = (struct ext4_dir_entry_2 *) data1; de = (struct ext4_dir_entry_2 *) data1;
top = data1 + len; top = data1 + len;

View File

@ -861,12 +861,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
gdp = (struct ext4_group_desc *)((char *)primary->b_data + gdp = (struct ext4_group_desc *)((char *)primary->b_data +
gdb_off * EXT4_DESC_SIZE(sb)); gdb_off * EXT4_DESC_SIZE(sb));
memset(gdp, 0, EXT4_DESC_SIZE(sb));
ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
ext4_free_blks_set(sb, gdp, input->free_blocks_count); ext4_free_blks_set(sb, gdp, input->free_blocks_count);
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED);
gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
/* /*

View File

@ -37,10 +37,10 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/math64.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/div64.h>
EXPORT_SYMBOL(jbd2_journal_start); EXPORT_SYMBOL(jbd2_journal_start);
EXPORT_SYMBOL(jbd2_journal_restart); EXPORT_SYMBOL(jbd2_journal_restart);
@ -846,8 +846,8 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid)); jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid));
seq_printf(seq, " %ums logging transaction\n", seq_printf(seq, " %ums logging transaction\n",
jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid)); jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid));
seq_printf(seq, " %luus average transaction commit time\n", seq_printf(seq, " %lluus average transaction commit time\n",
do_div(s->journal->j_average_commit_time, 1000)); div_u64(s->journal->j_average_commit_time, 1000));
seq_printf(seq, " %lu handles per transaction\n", seq_printf(seq, " %lu handles per transaction\n",
s->stats->u.run.rs_handle_count / s->stats->ts_tid); s->stats->u.run.rs_handle_count / s->stats->ts_tid);
seq_printf(seq, " %lu blocks per transaction\n", seq_printf(seq, " %lu blocks per transaction\n",

View File

@ -445,10 +445,9 @@
* section in the linker script will go there too. @phdr should have * section in the linker script will go there too. @phdr should have
* a leading colon. * a leading colon.
* *
* This macro defines three symbols, __per_cpu_load, __per_cpu_start * Note that this macros defines __per_cpu_load as an absolute symbol.
* and __per_cpu_end. The first one is the vaddr of loaded percpu * If there is no need to put the percpu section at a predetermined
* init data. __per_cpu_start equals @vaddr and __per_cpu_end is the * address, use PERCPU().
* end offset.
*/ */
#define PERCPU_VADDR(vaddr, phdr) \ #define PERCPU_VADDR(vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_load) = .; \
@ -470,7 +469,20 @@
* Align to @align and outputs output section for percpu area. This * Align to @align and outputs output section for percpu area. This
* macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical. * __per_cpu_start will be identical.
*
* This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
* that __per_cpu_load is defined as a relative symbol against
* .data.percpu which is required for relocatable x86_32
* configuration.
*/ */
#define PERCPU(align) \ #define PERCPU(align) \
. = ALIGN(align); \ . = ALIGN(align); \
PERCPU_VADDR( , ) .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
*(.data.percpu.first) \
*(.data.percpu.page_aligned) \
*(.data.percpu) \
*(.data.percpu.shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
}

View File

@ -41,6 +41,7 @@ header-y += baycom.h
header-y += bfs_fs.h header-y += bfs_fs.h
header-y += blkpg.h header-y += blkpg.h
header-y += bpqether.h header-y += bpqether.h
header-y += bsg.h
header-y += can.h header-y += can.h
header-y += cdk.h header-y += cdk.h
header-y += chio.h header-y += chio.h

View File

@ -144,7 +144,7 @@ struct bio {
* bit 1 -- rw-ahead when set * bit 1 -- rw-ahead when set
* bit 2 -- barrier * bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously * Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this oen is issued. * submitted IO to be completed before this one is issued.
* bit 3 -- synchronous I/O hint: the block layer will unplug immediately * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just * Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging. * that the block layer will not postpone issue of this IO by plugging.
@ -163,12 +163,33 @@ struct bio {
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2 #define BIO_RW_BARRIER 2
#define BIO_RW_SYNC 3 #define BIO_RW_SYNCIO 3
#define BIO_RW_META 4 #define BIO_RW_UNPLUG 4
#define BIO_RW_DISCARD 5 #define BIO_RW_META 5
#define BIO_RW_FAILFAST_DEV 6 #define BIO_RW_DISCARD 6
#define BIO_RW_FAILFAST_TRANSPORT 7 #define BIO_RW_FAILFAST_DEV 7
#define BIO_RW_FAILFAST_DRIVER 8 #define BIO_RW_FAILFAST_TRANSPORT 8
#define BIO_RW_FAILFAST_DRIVER 9
#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG)
#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
/*
* Old defines, these should eventually be replaced by direct usage of
* bio_rw_flagged()
*/
#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER)
#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO)
#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG)
#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
#define bio_failfast_transport(bio) \
bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
#define bio_failfast_driver(bio) \
bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
/* /*
* upper 16 bits of bi_rw define the io priority of this bio * upper 16 bits of bi_rw define the io priority of this bio
@ -193,15 +214,6 @@ struct bio {
#define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9) #define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
#define bio_failfast_transport(bio) \
((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio) static inline unsigned int bio_cur_sectors(struct bio *bio)
@ -312,7 +324,6 @@ struct bio_integrity_payload {
void *bip_buf; /* generated integrity data */ void *bip_buf; /* generated integrity data */
bio_end_io_t *bip_end_io; /* saved I/O completion fn */ bio_end_io_t *bip_end_io; /* saved I/O completion fn */
int bip_error; /* saved I/O error */
unsigned int bip_size; unsigned int bip_size;
unsigned short bip_pool; /* pool the ivec came from */ unsigned short bip_pool; /* pool the ivec came from */

View File

@ -108,6 +108,7 @@ enum rq_flag_bits {
__REQ_RW_META, /* metadata io request */ __REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */ __REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */ __REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_UNPLUG, /* unplug queue on submission */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
@ -134,6 +135,7 @@ enum rq_flag_bits {
#define REQ_RW_META (1 << __REQ_RW_META) #define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) #define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define BLK_MAX_CDB 16 #define BLK_MAX_CDB 16
@ -449,6 +451,11 @@ struct request_queue
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
1 << QUEUE_FLAG_STACKABLE)
static inline int queue_is_locked(struct request_queue *q) static inline int queue_is_locked(struct request_queue *q)
{ {
@ -565,6 +572,7 @@ enum {
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \ #define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)

View File

@ -308,7 +308,8 @@ void buffer_assertion_failure(struct buffer_head *bh);
int val = (expr); \ int val = (expr); \
if (!val) { \ if (!val) { \
printk(KERN_ERR \ printk(KERN_ERR \
"EXT3-fs unexpected failure: %s;\n",# expr); \ "JBD2 unexpected failure: %s: %s;\n", \
__func__, #expr); \
printk(KERN_ERR why "\n"); \ printk(KERN_ERR why "\n"); \
} \ } \
val; \ val; \

View File

@ -182,7 +182,7 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
size = 2048; size = 2048;
if (nr_pcpus >= 32) if (nr_pcpus >= 32)
size = 4096; size = 4096;
if (sizeof(rwlock_t) != 0) { if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE) if (size * sizeof(spinlock_t) > PAGE_SIZE)
hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));

View File

@ -658,6 +658,9 @@ again: remove_next = 1 + (end > next->vm_end);
validate_mm(mm); validate_mm(mm);
} }
/* Flags that can be inherited from an existing mapping when merging */
#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
/* /*
* If the vma has a ->close operation then the driver probably needs to release * If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those. * per-vma resources, so we don't attempt to merge those.
@ -665,7 +668,7 @@ again: remove_next = 1 + (end > next->vm_end);
static inline int is_mergeable_vma(struct vm_area_struct *vma, static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags) struct file *file, unsigned long vm_flags)
{ {
if (vma->vm_flags != vm_flags) if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
return 0; return 0;
if (vma->vm_file != file) if (vma->vm_file != file)
return 0; return 0;

View File

@ -2212,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
return 0; return 0;
next_skb: next_skb:
block_limit = skb_headlen(st->cur_skb); block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit) { if (abs_offset < block_limit) {
*data = st->cur_skb->data + abs_offset; *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset; return block_limit - abs_offset;
} }
@ -2250,13 +2250,14 @@ next_skb:
st->frag_data = NULL; st->frag_data = NULL;
} }
if (st->cur_skb->next) { if (st->root_skb == st->cur_skb &&
st->cur_skb = st->cur_skb->next; skb_shinfo(st->root_skb)->frag_list) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0; st->frag_idx = 0;
goto next_skb; goto next_skb;
} else if (st->root_skb == st->cur_skb && } else if (st->cur_skb->next) {
skb_shinfo(st->root_skb)->frag_list) { st->cur_skb = st->cur_skb->next;
st->cur_skb = skb_shinfo(st->root_skb)->frag_list; st->frag_idx = 0;
goto next_skb; goto next_skb;
} }

View File

@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name)
static int __init ip_auto_config(void) static int __init ip_auto_config(void)
{ {
__be32 addr; __be32 addr;
#ifdef IPCONFIG_DYNAMIC
int retries = CONF_OPEN_RETRIES;
#endif
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void)
#endif #endif
ic_first_dev->next) { ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC #ifdef IPCONFIG_DYNAMIC
int retries = CONF_OPEN_RETRIES;
if (ic_dynamic() < 0) { if (ic_dynamic() < 0) {
ic_close_devs(); ic_close_devs();

View File

@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
struct tcp_splice_state *tss = rd_desc->arg.data; struct tcp_splice_state *tss = rd_desc->arg.data;
int ret; int ret;
ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
tss->flags);
if (ret > 0) if (ret > 0)
rd_desc->count -= ret; rd_desc->count -= ret;
return ret; return ret;

View File

@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_t udp_memory_allocated; atomic_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated); EXPORT_SYMBOL(udp_memory_allocated);
#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
static int udp_lib_lport_inuse(struct net *net, __u16 num, static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot, const struct udp_hslot *hslot,
unsigned long *bitmap,
struct sock *sk, struct sock *sk,
int (*saddr_comp)(const struct sock *sk1, int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2)) const struct sock *sk2))
@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
sk_nulls_for_each(sk2, node, &hslot->head) sk_nulls_for_each(sk2, node, &hslot->head)
if (net_eq(sock_net(sk2), net) && if (net_eq(sock_net(sk2), net) &&
sk2 != sk && sk2 != sk &&
sk2->sk_hash == num && (bitmap || sk2->sk_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(*saddr_comp)(sk, sk2)) (*saddr_comp)(sk, sk2)) {
return 1; if (bitmap)
__set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
bitmap);
else
return 1;
}
return 0; return 0;
} }
@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
if (!snum) { if (!snum) {
int low, high, remaining; int low, high, remaining;
unsigned rand; unsigned rand;
unsigned short first; unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(&low, &high); inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1; remaining = (high - low) + 1;
rand = net_random(); rand = net_random();
snum = first = rand % remaining + low; first = (((u64)rand * remaining) >> 32) + low;
rand |= 1; /*
for (;;) { * force rand to be an odd multiple of UDP_HTABLE_SIZE
hslot = &udptable->hash[udp_hashfn(net, snum)]; */
rand = (rand | 1) * UDP_HTABLE_SIZE;
for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
hslot = &udptable->hash[udp_hashfn(net, first)];
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock); spin_lock_bh(&hslot->lock);
if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
break; saddr_comp);
spin_unlock_bh(&hslot->lock);
snum = first;
/*
* Iterate on all possible values of snum for this hash.
* Using steps of an odd multiple of UDP_HTABLE_SIZE
* give us randomization and full range coverage.
*/
do { do {
snum = snum + rand; if (low <= snum && snum <= high &&
} while (snum < low || snum > high); !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
if (snum == first) goto found;
goto fail; snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
} }
goto fail;
} else { } else {
hslot = &udptable->hash[udp_hashfn(net, snum)]; hslot = &udptable->hash[udp_hashfn(net, snum)];
spin_lock_bh(&hslot->lock); spin_lock_bh(&hslot->lock);
if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
goto fail_unlock; goto fail_unlock;
} }
found:
inet_sk(sk)->num = snum; inet_sk(sk)->num = snum;
sk->sk_hash = snum; sk->sk_hash = snum;
if (sk_unhashed(sk)) { if (sk_unhashed(sk)) {

View File

@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table
.procname = "mc_forwarding", .procname = "mc_forwarding",
.data = &ipv6_devconf.mc_forwarding, .data = &ipv6_devconf.mc_forwarding,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0444,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif

View File

@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
goto relookup_failed; goto relookup_failed;
if (ip6_dst_lookup(sk, &dst2, &fl)) if (ip6_dst_lookup(sk, &dst2, &fl2))
goto relookup_failed; goto relookup_failed;
err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP); err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
switch (err) { switch (err) {
case 0: case 0:
dst_release(dst); dst_release(dst);

View File

@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb)
* IPv6 multicast router mode is now supported ;) * IPv6 multicast router mode is now supported ;)
*/ */
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
!(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
/* /*
* Okay, we try to forward - split and duplicate * Okay, we try to forward - split and duplicate
@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb)
} }
if (skb2) { if (skb2) {
skb2->dev = skb2->dst->dev;
ip6_mr_input(skb2); ip6_mr_input(skb2);
} }
} }

View File

@ -365,7 +365,9 @@ static int pim6_rcv(struct sk_buff *skb)
pim = (struct pimreghdr *)skb_transport_header(skb); pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
(pim->flags & PIM_NULL_REGISTER) || (pim->flags & PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
sizeof(*pim), IPPROTO_PIM,
csum_partial((void *)pim, sizeof(*pim), 0)) &&
csum_fold(skb_checksum(skb, 0, skb->len, 0)))) csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop; goto drop;
@ -392,7 +394,7 @@ static int pim6_rcv(struct sk_buff *skb)
skb_pull(skb, (u8 *)encap - skb->data); skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb->dev = reg_dev; skb->dev = reg_dev;
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = 0; skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST; skb->pkt_type = PACKET_HOST;
dst_release(skb->dst); dst_release(skb->dst);
@ -481,6 +483,7 @@ static int mif6_delete(struct net *net, int vifi)
{ {
struct mif_device *v; struct mif_device *v;
struct net_device *dev; struct net_device *dev;
struct inet6_dev *in6_dev;
if (vifi < 0 || vifi >= net->ipv6.maxvif) if (vifi < 0 || vifi >= net->ipv6.maxvif)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
@ -513,6 +516,10 @@ static int mif6_delete(struct net *net, int vifi)
dev_set_allmulti(dev, -1); dev_set_allmulti(dev, -1);
in6_dev = __in6_dev_get(dev);
if (in6_dev)
in6_dev->cnf.mc_forwarding--;
if (v->flags & MIFF_REGISTER) if (v->flags & MIFF_REGISTER)
unregister_netdevice(dev); unregister_netdevice(dev);
@ -622,6 +629,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
int vifi = vifc->mif6c_mifi; int vifi = vifc->mif6c_mifi;
struct mif_device *v = &net->ipv6.vif6_table[vifi]; struct mif_device *v = &net->ipv6.vif6_table[vifi];
struct net_device *dev; struct net_device *dev;
struct inet6_dev *in6_dev;
int err; int err;
/* Is vif busy ? */ /* Is vif busy ? */
@ -662,6 +670,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
return -EINVAL; return -EINVAL;
} }
in6_dev = __in6_dev_get(dev);
if (in6_dev)
in6_dev->cnf.mc_forwarding++;
/* /*
* Fill in the VIF structures * Fill in the VIF structures
*/ */
@ -838,8 +850,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
skb->dst = dst_clone(pkt->dst); skb->dst = dst_clone(pkt->dst);
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_pull(skb, sizeof(struct ipv6hdr));
} }
if (net->ipv6.mroute6_sk == NULL) { if (net->ipv6.mroute6_sk == NULL) {
@ -1222,8 +1232,10 @@ static int ip6mr_sk_init(struct sock *sk)
rtnl_lock(); rtnl_lock();
write_lock_bh(&mrt_lock); write_lock_bh(&mrt_lock);
if (likely(net->ipv6.mroute6_sk == NULL)) if (likely(net->ipv6.mroute6_sk == NULL)) {
net->ipv6.mroute6_sk = sk; net->ipv6.mroute6_sk = sk;
net->ipv6.devconf_all->mc_forwarding++;
}
else else
err = -EADDRINUSE; err = -EADDRINUSE;
write_unlock_bh(&mrt_lock); write_unlock_bh(&mrt_lock);
@ -1242,6 +1254,7 @@ int ip6mr_sk_done(struct sock *sk)
if (sk == net->ipv6.mroute6_sk) { if (sk == net->ipv6.mroute6_sk) {
write_lock_bh(&mrt_lock); write_lock_bh(&mrt_lock);
net->ipv6.mroute6_sk = NULL; net->ipv6.mroute6_sk = NULL;
net->ipv6.devconf_all->mc_forwarding--;
write_unlock_bh(&mrt_lock); write_unlock_bh(&mrt_lock);
mroute_clean_tables(net); mroute_clean_tables(net);

View File

@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb)
.proto = iph->nexthdr, .proto = iph->nexthdr,
}; };
if (rt6_need_strict(&iph->daddr)) if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE; flags |= RT6_LOOKUP_F_IFACE;
skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);

View File

@ -28,17 +28,6 @@
#include "debug-levels.h" #include "debug-levels.h"
/* Debug framework control of debug levels */
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(id_table),
D_SUBMODULE_DEFINE(op_msg),
D_SUBMODULE_DEFINE(op_reset),
D_SUBMODULE_DEFINE(op_rfkill),
D_SUBMODULE_DEFINE(stack),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \ #define __debugfs_register(prefix, name, parent) \
do { \ do { \
result = d_level_register_debugfs(prefix, name, parent); \ result = d_level_register_debugfs(prefix, name, parent); \

View File

@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev)
} }
EXPORT_SYMBOL_GPL(wimax_dev_rm); EXPORT_SYMBOL_GPL(wimax_dev_rm);
/* Debug framework control of debug levels */
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(id_table),
D_SUBMODULE_DEFINE(op_msg),
D_SUBMODULE_DEFINE(op_reset),
D_SUBMODULE_DEFINE(op_rfkill),
D_SUBMODULE_DEFINE(stack),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
struct genl_family wimax_gnl_family = { struct genl_family wimax_gnl_family = {
.id = GENL_ID_GENERATE, .id = GENL_ID_GENERATE,
.name = "WiMAX", .name = "WiMAX",

View File

@ -498,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
* calculate the number of reg rules we will need. We will need one * calculate the number of reg rules we will need. We will need one
* for each channel subband */ * for each channel subband */
while (country_ie_len >= 3) { while (country_ie_len >= 3) {
int end_channel = 0;
struct ieee80211_country_ie_triplet *triplet = struct ieee80211_country_ie_triplet *triplet =
(struct ieee80211_country_ie_triplet *) country_ie; (struct ieee80211_country_ie_triplet *) country_ie;
int cur_sub_max_channel = 0, cur_channel = 0; int cur_sub_max_channel = 0, cur_channel = 0;
@ -509,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue; continue;
} }
/* 2 GHz */
if (triplet->chans.first_channel <= 14)
end_channel = triplet->chans.first_channel +
triplet->chans.num_channels;
else
/*
* 5 GHz -- For example in country IEs if the first
* channel given is 36 and the number of channels is 4
* then the individual channel numbers defined for the
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
* and not 36, 37, 38, 39.
*
* See: http://tinyurl.com/11d-clarification
*/
end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1));
cur_channel = triplet->chans.first_channel; cur_channel = triplet->chans.first_channel;
cur_sub_max_channel = ieee80211_channel_to_frequency( cur_sub_max_channel = end_channel;
cur_channel + triplet->chans.num_channels);
/* Basic sanity check */ /* Basic sanity check */
if (cur_sub_max_channel < cur_channel) if (cur_sub_max_channel < cur_channel)
@ -590,15 +607,6 @@ static struct ieee80211_regdomain *country_ie_2_rd(
end_channel = triplet->chans.first_channel + end_channel = triplet->chans.first_channel +
triplet->chans.num_channels; triplet->chans.num_channels;
else else
/*
* 5 GHz -- For example in country IEs if the first
* channel given is 36 and the number of channels is 4
* then the individual channel numbers defined for the
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
* and not 36, 37, 38, 39.
*
* See: http://tinyurl.com/11d-clarification
*/
end_channel = triplet->chans.first_channel + end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1)); (4 * (triplet->chans.num_channels - 1));
@ -1276,7 +1284,7 @@ static void reg_country_ie_process_debug(
if (intersected_rd) { if (intersected_rd) {
printk(KERN_DEBUG "cfg80211: We intersect both of these " printk(KERN_DEBUG "cfg80211: We intersect both of these "
"and get:\n"); "and get:\n");
print_regdomain_info(rd); print_regdomain_info(intersected_rd);
return; return;
} }
printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");