mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-27 11:55:53 +00:00
Merge branch 'master' into for-next
Merge with 49717cb
("kthread: Document ways of reducing OS jitter due
to per-CPU kthreads") to be able to apply fixup patch on top of it.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
This commit is contained in:
commit
864bfb25b5
6
CREDITS
6
CREDITS
@ -953,11 +953,11 @@ S: Blacksburg, Virginia 24061
|
||||
S: USA
|
||||
|
||||
N: Randy Dunlap
|
||||
E: rdunlap@xenotime.net
|
||||
W: http://www.xenotime.net/linux/linux.html
|
||||
W: http://www.linux-usb.org
|
||||
E: rdunlap@infradead.org
|
||||
W: http://www.infradead.org/~rdunlap/
|
||||
D: Linux-USB subsystem, USB core/UHCI/printer/storage drivers
|
||||
D: x86 SMP, ACPI, bootflag hacking
|
||||
D: documentation, builds
|
||||
S: (ask for current address)
|
||||
S: USA
|
||||
|
||||
|
@ -217,9 +217,14 @@ over a rather long period of time, but improvements are always welcome!
|
||||
whether the increased speed is worth it.
|
||||
|
||||
8. Although synchronize_rcu() is slower than is call_rcu(), it
|
||||
usually results in simpler code. So, unless update performance
|
||||
is critically important or the updaters cannot block,
|
||||
synchronize_rcu() should be used in preference to call_rcu().
|
||||
usually results in simpler code. So, unless update performance is
|
||||
critically important, the updaters cannot block, or the latency of
|
||||
synchronize_rcu() is visible from userspace, synchronize_rcu()
|
||||
should be used in preference to call_rcu(). Furthermore,
|
||||
kfree_rcu() usually results in even simpler code than does
|
||||
synchronize_rcu() without synchronize_rcu()'s multi-millisecond
|
||||
latency. So please take advantage of kfree_rcu()'s "fire and
|
||||
forget" memory-freeing capabilities where it applies.
|
||||
|
||||
An especially important property of the synchronize_rcu()
|
||||
primitive is that it automatically self-limits: if grace periods
|
||||
@ -268,7 +273,8 @@ over a rather long period of time, but improvements are always welcome!
|
||||
e. Periodically invoke synchronize_rcu(), permitting a limited
|
||||
number of updates per grace period.
|
||||
|
||||
The same cautions apply to call_rcu_bh() and call_rcu_sched().
|
||||
The same cautions apply to call_rcu_bh(), call_rcu_sched(),
|
||||
call_srcu(), and kfree_rcu().
|
||||
|
||||
9. All RCU list-traversal primitives, which include
|
||||
rcu_dereference(), list_for_each_entry_rcu(), and
|
||||
@ -296,9 +302,9 @@ over a rather long period of time, but improvements are always welcome!
|
||||
all currently executing rcu_read_lock()-protected RCU read-side
|
||||
critical sections complete. It does -not- necessarily guarantee
|
||||
that all currently running interrupts, NMIs, preempt_disable()
|
||||
code, or idle loops will complete. Therefore, if you do not have
|
||||
rcu_read_lock()-protected read-side critical sections, do -not-
|
||||
use synchronize_rcu().
|
||||
code, or idle loops will complete. Therefore, if your
|
||||
read-side critical sections are protected by something other
|
||||
than rcu_read_lock(), do -not- use synchronize_rcu().
|
||||
|
||||
Similarly, disabling preemption is not an acceptable substitute
|
||||
for rcu_read_lock(). Code that attempts to use preemption
|
||||
@ -401,9 +407,9 @@ over a rather long period of time, but improvements are always welcome!
|
||||
read-side critical sections. It is the responsibility of the
|
||||
RCU update-side primitives to deal with this.
|
||||
|
||||
17. Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and
|
||||
the __rcu sparse checks to validate your RCU code. These
|
||||
can help find problems as follows:
|
||||
17. Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
|
||||
__rcu sparse checks (enabled by CONFIG_SPARSE_RCU_POINTER) to
|
||||
validate your RCU code. These can help find problems as follows:
|
||||
|
||||
CONFIG_PROVE_RCU: check that accesses to RCU-protected data
|
||||
structures are carried out under the proper RCU
|
||||
|
@ -64,6 +64,11 @@ checking of rcu_dereference() primitives:
|
||||
but retain the compiler constraints that prevent duplicating
|
||||
or coalescsing. This is useful when when testing the
|
||||
value of the pointer itself, for example, against NULL.
|
||||
rcu_access_index(idx):
|
||||
Return the value of the index and omit all barriers, but
|
||||
retain the compiler constraints that prevent duplicating
|
||||
or coalescsing. This is useful when when testing the
|
||||
value of the index itself, for example, against -1.
|
||||
|
||||
The rcu_dereference_check() check expression can be any boolean
|
||||
expression, but would normally include a lockdep expression. However,
|
||||
|
@ -79,7 +79,20 @@ complete. Pseudo-code using rcu_barrier() is as follows:
|
||||
2. Execute rcu_barrier().
|
||||
3. Allow the module to be unloaded.
|
||||
|
||||
The rcutorture module makes use of rcu_barrier in its exit function
|
||||
There are also rcu_barrier_bh(), rcu_barrier_sched(), and srcu_barrier()
|
||||
functions for the other flavors of RCU, and you of course must match
|
||||
the flavor of rcu_barrier() with that of call_rcu(). If your module
|
||||
uses multiple flavors of call_rcu(), then it must also use multiple
|
||||
flavors of rcu_barrier() when unloading that module. For example, if
|
||||
it uses call_rcu_bh(), call_srcu() on srcu_struct_1, and call_srcu() on
|
||||
srcu_struct_2(), then the following three lines of code will be required
|
||||
when unloading:
|
||||
|
||||
1 rcu_barrier_bh();
|
||||
2 srcu_barrier(&srcu_struct_1);
|
||||
3 srcu_barrier(&srcu_struct_2);
|
||||
|
||||
The rcutorture module makes use of rcu_barrier() in its exit function
|
||||
as follows:
|
||||
|
||||
1 static void
|
||||
|
@ -92,14 +92,14 @@ If the CONFIG_RCU_CPU_STALL_INFO kernel configuration parameter is set,
|
||||
more information is printed with the stall-warning message, for example:
|
||||
|
||||
INFO: rcu_preempt detected stall on CPU
|
||||
0: (63959 ticks this GP) idle=241/3fffffffffffffff/0
|
||||
0: (63959 ticks this GP) idle=241/3fffffffffffffff/0 softirq=82/543
|
||||
(t=65000 jiffies)
|
||||
|
||||
In kernels with CONFIG_RCU_FAST_NO_HZ, even more information is
|
||||
printed:
|
||||
|
||||
INFO: rcu_preempt detected stall on CPU
|
||||
0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 drain=0 . timer not pending
|
||||
0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D
|
||||
(t=65000 jiffies)
|
||||
|
||||
The "(64628 ticks this GP)" indicates that this CPU has taken more
|
||||
@ -116,13 +116,28 @@ number between the two "/"s is the value of the nesting, which will
|
||||
be a small positive number if in the idle loop and a very large positive
|
||||
number (as shown above) otherwise.
|
||||
|
||||
For CONFIG_RCU_FAST_NO_HZ kernels, the "drain=0" indicates that the CPU is
|
||||
not in the process of trying to force itself into dyntick-idle state, the
|
||||
"." indicates that the CPU has not given up forcing RCU into dyntick-idle
|
||||
mode (it would be "H" otherwise), and the "timer not pending" indicates
|
||||
that the CPU has not recently forced RCU into dyntick-idle mode (it
|
||||
would otherwise indicate the number of microseconds remaining in this
|
||||
forced state).
|
||||
The "softirq=" portion of the message tracks the number of RCU softirq
|
||||
handlers that the stalled CPU has executed. The number before the "/"
|
||||
is the number that had executed since boot at the time that this CPU
|
||||
last noted the beginning of a grace period, which might be the current
|
||||
(stalled) grace period, or it might be some earlier grace period (for
|
||||
example, if the CPU might have been in dyntick-idle mode for an extended
|
||||
time period. The number after the "/" is the number that have executed
|
||||
since boot until the current time. If this latter number stays constant
|
||||
across repeated stall-warning messages, it is possible that RCU's softirq
|
||||
handlers are no longer able to execute on this CPU. This can happen if
|
||||
the stalled CPU is spinning with interrupts are disabled, or, in -rt
|
||||
kernels, if a high-priority process is starving RCU's softirq handler.
|
||||
|
||||
For CONFIG_RCU_FAST_NO_HZ kernels, the "last_accelerate:" prints the
|
||||
low-order 16 bits (in hex) of the jiffies counter when this CPU last
|
||||
invoked rcu_try_advance_all_cbs() from rcu_needs_cpu() or last invoked
|
||||
rcu_accelerate_cbs() from rcu_prepare_for_idle(). The "nonlazy_posted:"
|
||||
prints the number of non-lazy callbacks posted since the last call to
|
||||
rcu_needs_cpu(). Finally, an "L" indicates that there are currently
|
||||
no non-lazy callbacks ("." is printed otherwise, as shown above) and
|
||||
"D" indicates that dyntick-idle processing is enabled ("." is printed
|
||||
otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
|
||||
|
||||
|
||||
Multiple Warnings From One Stall
|
||||
|
@ -265,9 +265,9 @@ rcu_dereference()
|
||||
rcu_read_lock();
|
||||
p = rcu_dereference(head.next);
|
||||
rcu_read_unlock();
|
||||
x = p->address;
|
||||
x = p->address; /* BUG!!! */
|
||||
rcu_read_lock();
|
||||
y = p->data;
|
||||
y = p->data; /* BUG!!! */
|
||||
rcu_read_unlock();
|
||||
|
||||
Holding a reference from one RCU read-side critical section
|
||||
|
@ -60,8 +60,7 @@ own source tree. For example:
|
||||
"dontdiff" is a list of files which are generated by the kernel during
|
||||
the build process, and should be ignored in any diff(1)-generated
|
||||
patch. The "dontdiff" file is included in the kernel tree in
|
||||
2.6.12 and later. For earlier kernel versions, you can get it
|
||||
from <http://www.xenotime.net/linux/doc/dontdiff>.
|
||||
2.6.12 and later.
|
||||
|
||||
Make sure your patch does not include any extra files which do not
|
||||
belong in a patch submission. Make sure to review your patch -after-
|
||||
|
@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
|
||||
raid10 Various RAID10 inspired algorithms chosen by additional params
|
||||
- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
|
||||
- RAID1E: Integrated Adjacent Stripe Mirroring
|
||||
- RAID1E: Integrated Offset Stripe Mirroring
|
||||
- and other similar RAID10 variants
|
||||
|
||||
Reference: Chapter 4 of
|
||||
@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
|
||||
synchronisation state for each region.
|
||||
|
||||
[raid10_copies <# copies>]
|
||||
[raid10_format near]
|
||||
[raid10_format <near|far|offset>]
|
||||
These two options are used to alter the default layout of
|
||||
a RAID10 configuration. The number of copies is can be
|
||||
specified, but the default is 2. There are other variations
|
||||
to how the copies are laid down - the default and only current
|
||||
option is "near". Near copies are what most people think of
|
||||
with respect to mirroring. If these options are left
|
||||
unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
|
||||
are given, then the layouts for 2, 3 and 4 devices are:
|
||||
specified, but the default is 2. There are also three
|
||||
variations to how the copies are laid down - the default
|
||||
is "near". Near copies are what most people think of with
|
||||
respect to mirroring. If these options are left unspecified,
|
||||
or 'raid10_copies 2' and/or 'raid10_format near' are given,
|
||||
then the layouts for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- ---------- --------------
|
||||
A1 A1 A1 A1 A2 A1 A1 A2 A2
|
||||
@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
|
||||
3-device layout is what might be called a 'RAID1E - Integrated
|
||||
Adjacent Stripe Mirroring'.
|
||||
|
||||
If 'raid10_copies 2' and 'raid10_format far', then the layouts
|
||||
for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- -------------- --------------------
|
||||
A1 A2 A1 A2 A3 A1 A2 A3 A4
|
||||
A3 A4 A4 A5 A6 A5 A6 A7 A8
|
||||
A5 A6 A7 A8 A9 A9 A10 A11 A12
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
A2 A1 A3 A1 A2 A2 A1 A4 A3
|
||||
A4 A3 A6 A4 A5 A6 A5 A8 A7
|
||||
A6 A5 A9 A7 A8 A10 A9 A12 A11
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
|
||||
If 'raid10_copies 2' and 'raid10_format offset', then the
|
||||
layouts for 2, 3 and 4 devices are:
|
||||
2 drives 3 drives 4 drives
|
||||
-------- ------------ -----------------
|
||||
A1 A2 A1 A2 A3 A1 A2 A3 A4
|
||||
A2 A1 A3 A1 A2 A2 A1 A4 A3
|
||||
A3 A4 A4 A5 A6 A5 A6 A7 A8
|
||||
A4 A3 A6 A4 A5 A6 A5 A8 A7
|
||||
A5 A6 A7 A8 A9 A9 A10 A11 A12
|
||||
A6 A5 A9 A7 A8 A10 A9 A12 A11
|
||||
.. .. .. .. .. .. .. .. ..
|
||||
Here we see layouts closely akin to 'RAID1E - Integrated
|
||||
Offset Stripe Mirroring'.
|
||||
|
||||
<#raid_devs>: The number of devices composing the array.
|
||||
Each device consists of two entries. The first is the device
|
||||
containing the metadata (if any); the second is the one containing the
|
||||
@ -142,3 +170,5 @@ Version History
|
||||
1.3.0 Added support for RAID 10
|
||||
1.3.1 Allow device replacement/rebuild for RAID 10
|
||||
1.3.2 Fix/improve redundancy checking for RAID10
|
||||
1.4.0 Non-functional change. Removes arg from mapping function.
|
||||
1.4.1 Add RAID10 "far" and "offset" algorithm support.
|
||||
|
@ -15,7 +15,7 @@ Supported chips:
|
||||
Addresses scanned: -
|
||||
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -4,9 +4,14 @@ Kernel driver adt7410
|
||||
Supported chips:
|
||||
* Analog Devices ADT7410
|
||||
Prefix: 'adt7410'
|
||||
Addresses scanned: I2C 0x48 - 0x4B
|
||||
Addresses scanned: None
|
||||
Datasheet: Publicly available at the Analog Devices website
|
||||
http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
|
||||
* Analog Devices ADT7420
|
||||
Prefix: 'adt7420'
|
||||
Addresses scanned: None
|
||||
Datasheet: Publicly available at the Analog Devices website
|
||||
http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
|
||||
|
||||
Author: Hartmut Knaack <knaack.h@gmx.de>
|
||||
|
||||
@ -27,6 +32,10 @@ value per second or even justget one sample on demand for power saving.
|
||||
Besides, it can completely power down its ADC, if power management is
|
||||
required.
|
||||
|
||||
The ADT7420 is register compatible, the only differences being the package,
|
||||
a slightly narrower operating temperature range (-40°C to +150°C), and a
|
||||
better accuracy (0.25°C instead of 0.50°C.)
|
||||
|
||||
Configuration Notes
|
||||
-------------------
|
||||
|
||||
|
@ -49,7 +49,7 @@ Supported chips:
|
||||
Addresses scanned: I2C 0x18 - 0x1f
|
||||
|
||||
Author:
|
||||
Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -8,7 +8,7 @@ Supported devices:
|
||||
Documentation:
|
||||
http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -19,7 +19,7 @@ Supported chips:
|
||||
Datasheet:
|
||||
http://www.national.com/pf/LM/LM5066.html
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -5,13 +5,13 @@ Supported chips:
|
||||
* Linear Technology LTC2978
|
||||
Prefix: 'ltc2978'
|
||||
Addresses scanned: -
|
||||
Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
|
||||
Datasheet: http://www.linear.com/product/ltc2978
|
||||
* Linear Technology LTC3880
|
||||
Prefix: 'ltc3880'
|
||||
Addresses scanned: -
|
||||
Datasheet: http://cds.linear.com/docs/Datasheet/3880f.pdf
|
||||
Datasheet: http://www.linear.com/product/ltc3880
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -8,7 +8,7 @@ Supported chips:
|
||||
Datasheet:
|
||||
http://cds.linear.com/docs/Datasheet/42612fb.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -7,7 +7,7 @@ Supported chips:
|
||||
Addresses scanned: -
|
||||
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -24,7 +24,7 @@ Supported chips:
|
||||
http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
|
||||
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -27,7 +27,7 @@ Supported chips:
|
||||
Addresses scanned: -
|
||||
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -7,7 +7,7 @@ Supported chips:
|
||||
Addresses scanned: -
|
||||
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -34,7 +34,7 @@ Supported chips:
|
||||
Addresses scanned: -
|
||||
Datasheet: n.a.
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -29,7 +29,7 @@ Supported chips:
|
||||
http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
|
||||
http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Module Parameters
|
||||
|
@ -11,7 +11,7 @@ Supported chips:
|
||||
http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
|
||||
http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -15,7 +15,7 @@ Supported chips:
|
||||
http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
|
||||
http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -54,7 +54,7 @@ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
|
||||
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
|
||||
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
|
||||
Description
|
||||
|
@ -2459,9 +2459,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
|
||||
the specified list of CPUs to be no-callback CPUs.
|
||||
Invocation of these CPUs' RCU callbacks will
|
||||
be offloaded to "rcuoN" kthreads created for
|
||||
that purpose. This reduces OS jitter on the
|
||||
be offloaded to "rcuox/N" kthreads created for
|
||||
that purpose, where "x" is "b" for RCU-bh, "p"
|
||||
for RCU-preempt, and "s" for RCU-sched, and "N"
|
||||
is the CPU number. This reduces OS jitter on the
|
||||
offloaded CPUs, which can be useful for HPC and
|
||||
|
||||
real-time workloads. It can also improve energy
|
||||
efficiency for asymmetric multiprocessors.
|
||||
|
||||
@ -2485,6 +2488,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
leaf rcu_node structure. Useful for very large
|
||||
systems.
|
||||
|
||||
rcutree.jiffies_till_first_fqs= [KNL,BOOT]
|
||||
Set delay from grace-period initialization to
|
||||
first attempt to force quiescent states.
|
||||
Units are jiffies, minimum value is zero,
|
||||
and maximum value is HZ.
|
||||
|
||||
rcutree.jiffies_till_next_fqs= [KNL,BOOT]
|
||||
Set delay between subsequent attempts to force
|
||||
quiescent states. Units are jiffies, minimum
|
||||
value is one, and maximum value is HZ.
|
||||
|
||||
rcutree.qhimark= [KNL,BOOT]
|
||||
Set threshold of queued
|
||||
RCU callbacks over which batch limiting is disabled.
|
||||
@ -2499,16 +2513,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
rcutree.rcu_cpu_stall_timeout= [KNL,BOOT]
|
||||
Set timeout for RCU CPU stall warning messages.
|
||||
|
||||
rcutree.jiffies_till_first_fqs= [KNL,BOOT]
|
||||
Set delay from grace-period initialization to
|
||||
first attempt to force quiescent states.
|
||||
Units are jiffies, minimum value is zero,
|
||||
and maximum value is HZ.
|
||||
rcutree.rcu_idle_gp_delay= [KNL,BOOT]
|
||||
Set wakeup interval for idle CPUs that have
|
||||
RCU callbacks (RCU_FAST_NO_HZ=y).
|
||||
|
||||
rcutree.jiffies_till_next_fqs= [KNL,BOOT]
|
||||
Set delay between subsequent attempts to force
|
||||
quiescent states. Units are jiffies, minimum
|
||||
value is one, and maximum value is HZ.
|
||||
rcutree.rcu_idle_lazy_gp_delay= [KNL,BOOT]
|
||||
Set wakeup interval for idle CPUs that have
|
||||
only "lazy" RCU callbacks (RCU_FAST_NO_HZ=y).
|
||||
Lazy RCU callbacks are those which RCU can
|
||||
prove do nothing more than free memory.
|
||||
|
||||
rcutorture.fqs_duration= [KNL,BOOT]
|
||||
Set duration of force_quiescent_state bursts.
|
||||
|
202
Documentation/kernel-per-CPU-kthreads.txt
Normal file
202
Documentation/kernel-per-CPU-kthreads.txt
Normal file
@ -0,0 +1,202 @@
|
||||
REDUCING OS JITTER DUE TO PER-CPU KTHREADS
|
||||
|
||||
This document lists per-CPU kthreads in the Linux kernel and presents
|
||||
options to control their OS jitter. Note that non-per-CPU kthreads are
|
||||
not listed here. To reduce OS jitter from non-per-CPU kthreads, bind
|
||||
them to a "housekeeping" CPU dedicated to such work.
|
||||
|
||||
|
||||
REFERENCES
|
||||
|
||||
o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
|
||||
|
||||
o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs.
|
||||
|
||||
o man taskset: Using the taskset command to bind tasks to sets
|
||||
of CPUs.
|
||||
|
||||
o man sched_setaffinity: Using the sched_setaffinity() system
|
||||
call to bind tasks to sets of CPUs.
|
||||
|
||||
o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
|
||||
writing "0" to offline and "1" to online.
|
||||
|
||||
o In order to locate kernel-generated OS jitter on CPU N:
|
||||
|
||||
cd /sys/kernel/debug/tracing
|
||||
echo 1 > max_graph_depth # Increase the "1" for more detail
|
||||
echo function_graph > current_tracer
|
||||
# run workload
|
||||
cat per_cpu/cpuN/trace
|
||||
|
||||
|
||||
KTHREADS
|
||||
|
||||
Name: ehca_comp/%u
|
||||
Purpose: Periodically process Infiniband-related work.
|
||||
To reduce its OS jitter, do any of the following:
|
||||
1. Don't use eHCA Infiniband hardware, instead choosing hardware
|
||||
that does not require per-CPU kthreads. This will prevent these
|
||||
kthreads from being created in the first place. (This will
|
||||
work for most people, as this hardware, though important, is
|
||||
relatively old and is produced in relatively low unit volumes.)
|
||||
2. Do all eHCA-Infiniband-related work on other CPUs, including
|
||||
interrupts.
|
||||
3. Rework the eHCA driver so that its per-CPU kthreads are
|
||||
provisioned only on selected CPUs.
|
||||
|
||||
|
||||
Name: irq/%d-%s
|
||||
Purpose: Handle threaded interrupts.
|
||||
To reduce its OS jitter, do the following:
|
||||
1. Use irq affinity to force the irq threads to execute on
|
||||
some other CPU.
|
||||
|
||||
Name: kcmtpd_ctr_%d
|
||||
Purpose: Handle Bluetooth work.
|
||||
To reduce its OS jitter, do one of the following:
|
||||
1. Don't use Bluetooth, in which case these kthreads won't be
|
||||
created in the first place.
|
||||
2. Use irq affinity to force Bluetooth-related interrupts to
|
||||
occur on some other CPU and furthermore initiate all
|
||||
Bluetooth activity on some other CPU.
|
||||
|
||||
Name: ksoftirqd/%u
|
||||
Purpose: Execute softirq handlers when threaded or when under heavy load.
|
||||
To reduce its OS jitter, each softirq vector must be handled
|
||||
separately as follows:
|
||||
TIMER_SOFTIRQ: Do all of the following:
|
||||
1. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle, for example, by avoiding system calls and by forcing
|
||||
both kernel threads and interrupts to execute elsewhere.
|
||||
2. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force
|
||||
the CPU offline, then bring it back online. This forces
|
||||
recurring timers to migrate elsewhere. If you are concerned
|
||||
with multiple CPUs, force them all offline before bringing the
|
||||
first one back online. Once you have onlined the CPUs in question,
|
||||
do not offline any other CPUs, because doing so could force the
|
||||
timer back onto one of the CPUs in question.
|
||||
NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following:
|
||||
1. Force networking interrupts onto other CPUs.
|
||||
2. Initiate any network I/O on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
BLOCK_SOFTIRQ: Do all of the following:
|
||||
1. Force block-device interrupts onto some other CPU.
|
||||
2. Initiate any block I/O on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
BLOCK_IOPOLL_SOFTIRQ: Do all of the following:
|
||||
1. Force block-device interrupts onto some other CPU.
|
||||
2. Initiate any block I/O and block-I/O polling on other CPUs.
|
||||
3. Once your application has started, prevent CPU-hotplug operations
|
||||
from being initiated from tasks that might run on the CPU to
|
||||
be de-jittered. (It is OK to force this CPU offline and then
|
||||
bring it back online before you start your application.)
|
||||
TASKLET_SOFTIRQ: Do one or more of the following:
|
||||
1. Avoid use of drivers that use tasklets. (Such drivers will contain
|
||||
calls to things like tasklet_schedule().)
|
||||
2. Convert all drivers that you must use from tasklets to workqueues.
|
||||
3. Force interrupts for drivers using tasklets onto other CPUs,
|
||||
and also do I/O involving these drivers on other CPUs.
|
||||
SCHED_SOFTIRQ: Do all of the following:
|
||||
1. Avoid sending scheduler IPIs to the CPU to be de-jittered,
|
||||
for example, ensure that at most one runnable kthread is present
|
||||
on that CPU. If a thread that expects to run on the de-jittered
|
||||
CPU awakens, the scheduler will send an IPI that can result in
|
||||
a subsequent SCHED_SOFTIRQ.
|
||||
2. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
|
||||
to be de-jittered is marked as an adaptive-ticks CPU using the
|
||||
"nohz_full=" boot parameter. This reduces the number of
|
||||
scheduler-clock interrupts that the de-jittered CPU receives,
|
||||
minimizing its chances of being selected to do the load balancing
|
||||
work that runs in SCHED_SOFTIRQ context.
|
||||
3. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle, for example, by avoiding system calls and by
|
||||
forcing both kernel threads and interrupts to execute elsewhere.
|
||||
This further reduces the number of scheduler-clock interrupts
|
||||
received by the de-jittered CPU.
|
||||
HRTIMER_SOFTIRQ: Do all of the following:
|
||||
1. To the extent possible, keep the CPU out of the kernel when it
|
||||
is non-idle. For example, avoid system calls and force both
|
||||
kernel threads and interrupts to execute elsewhere.
|
||||
2. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the
|
||||
CPU offline, then bring it back online. This forces recurring
|
||||
timers to migrate elsewhere. If you are concerned with multiple
|
||||
CPUs, force them all offline before bringing the first one
|
||||
back online. Once you have onlined the CPUs in question, do not
|
||||
offline any other CPUs, because doing so could force the timer
|
||||
back onto one of the CPUs in question.
|
||||
RCU_SOFTIRQ: Do at least one of the following:
|
||||
1. Offload callbacks and keep the CPU in either dyntick-idle or
|
||||
adaptive-ticks state by doing all of the following:
|
||||
a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
|
||||
to be de-jittered is marked as an adaptive-ticks CPU using
|
||||
the "nohz_full=" boot parameter. Bind the rcuo kthreads
|
||||
to housekeeping CPUs, which can tolerate OS jitter.
|
||||
b. To the extent possible, keep the CPU out of the kernel
|
||||
when it is non-idle, for example, by avoiding system
|
||||
calls and by forcing both kernel threads and interrupts
|
||||
to execute elsewhere.
|
||||
2. Enable RCU to do its processing remotely via dyntick-idle by
|
||||
doing all of the following:
|
||||
a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
|
||||
b. Ensure that the CPU goes idle frequently, allowing other
|
||||
CPUs to detect that it has passed through an RCU quiescent
|
||||
state. If the kernel is built with CONFIG_NO_HZ_FULL=y,
|
||||
userspace execution also allows other CPUs to detect that
|
||||
the CPU in question has passed through a quiescent state.
|
||||
c. To the extent possible, keep the CPU out of the kernel
|
||||
when it is non-idle, for example, by avoiding system
|
||||
calls and by forcing both kernel threads and interrupts
|
||||
to execute elsewhere.
|
||||
|
||||
Name: rcuc/%u
|
||||
Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Build the kernel with CONFIG_PREEMPT=n. This prevents these
|
||||
kthreads from being created in the first place, and also obviates
|
||||
the need for RCU priority boosting. This approach is feasible
|
||||
for workloads that do not require high degrees of responsiveness.
|
||||
2. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these
|
||||
kthreads from being created in the first place. This approach
|
||||
is feasible only if your workload never requires RCU priority
|
||||
boosting, for example, if you ensure frequent idle time on all
|
||||
CPUs that might execute within the kernel.
|
||||
3. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
|
||||
which offloads all RCU callbacks to kthreads that can be moved
|
||||
off of CPUs susceptible to OS jitter. This approach prevents the
|
||||
rcuc/%u kthreads from having any work to do, so that they are
|
||||
never awakened.
|
||||
4. Ensure that the CPU never enters the kernel, and, in particular,
|
||||
avoid initiating any CPU hotplug operations on this CPU. This is
|
||||
another way of preventing any callbacks from being queued on the
|
||||
CPU, again preventing the rcuc/%u kthreads from having any work
|
||||
to do.
|
||||
|
||||
Name: rcuob/%d, rcuop/%d, and rcuos/%d
|
||||
Purpose: Offload RCU callbacks from the corresponding CPU.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Use affinity, cgroups, or other mechanism to force these kthreads
|
||||
to execute on some other CPU.
|
||||
2. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
|
||||
kthreads from being created in the first place. However, please
|
||||
note that this will not eliminate OS jitter, but will instead
|
||||
shift it to RCU_SOFTIRQ.
|
||||
|
||||
Name: watchdog/%u
|
||||
Purpose: Detect software lockups on each CPU.
|
||||
To reduce its OS jitter, do at least one of the following:
|
||||
1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
|
||||
kthreads from being created in the first place.
|
||||
2. Echo a zero to /proc/sys/kernel/watchdog to disable the
|
||||
watchdog timer.
|
||||
3. Echo a large number of /proc/sys/kernel/watchdog_thresh in
|
||||
order to reduce the frequency of OS jitter due to the watchdog
|
||||
timer down to a level that is acceptable for your workload.
|
@ -1,6 +1,5 @@
|
||||
*=============*
|
||||
* OPP Library *
|
||||
*=============*
|
||||
Operating Performance Points (OPP) Library
|
||||
==========================================
|
||||
|
||||
(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
|
||||
|
||||
@ -16,15 +15,31 @@ Contents
|
||||
|
||||
1. Introduction
|
||||
===============
|
||||
1.1 What is an Operating Performance Point (OPP)?
|
||||
|
||||
Complex SoCs of today consists of a multiple sub-modules working in conjunction.
|
||||
In an operational system executing varied use cases, not all modules in the SoC
|
||||
need to function at their highest performing frequency all the time. To
|
||||
facilitate this, sub-modules in a SoC are grouped into domains, allowing some
|
||||
domains to run at lower voltage and frequency while other domains are loaded
|
||||
more. The set of discrete tuples consisting of frequency and voltage pairs that
|
||||
domains to run at lower voltage and frequency while other domains run at
|
||||
voltage/frequency pairs that are higher.
|
||||
|
||||
The set of discrete tuples consisting of frequency and voltage pairs that
|
||||
the device will support per domain are called Operating Performance Points or
|
||||
OPPs.
|
||||
|
||||
As an example:
|
||||
Let us consider an MPU device which supports the following:
|
||||
{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
|
||||
{1GHz at minimum voltage of 1.3V}
|
||||
|
||||
We can represent these as three OPPs as the following {Hz, uV} tuples:
|
||||
{300000000, 1000000}
|
||||
{800000000, 1200000}
|
||||
{1000000000, 1300000}
|
||||
|
||||
1.2 Operating Performance Points Library
|
||||
|
||||
OPP library provides a set of helper functions to organize and query the OPP
|
||||
information. The library is located in drivers/base/power/opp.c and the header
|
||||
is located in include/linux/opp.h. OPP library can be enabled by enabling
|
||||
|
@ -170,5 +170,5 @@ Reminder: sizeof() result is of type size_t.
|
||||
Thank you for your cooperation and attention.
|
||||
|
||||
|
||||
By Randy Dunlap <rdunlap@xenotime.net> and
|
||||
By Randy Dunlap <rdunlap@infradead.org> and
|
||||
Andrew Murray <amurray@mpc-data.co.uk>
|
||||
|
18
MAINTAINERS
18
MAINTAINERS
@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
|
||||
|
||||
-----------------------------------
|
||||
|
||||
3C505 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/3c505*
|
||||
|
||||
3C59X NETWORK DRIVER
|
||||
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
|
||||
S: Maintained
|
||||
F: drivers/video/cyber2000fb.*
|
||||
|
||||
CYCLADES 2X SYNC CARD DRIVER
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
W: http://oops.ghostprotocols.net:81/blog
|
||||
S: Maintained
|
||||
F: drivers/net/wan/cycx*
|
||||
|
||||
CYCLADES ASYNC MUX DRIVER
|
||||
W: http://www.cyclades.com/
|
||||
S: Orphan
|
||||
@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
|
||||
F: drivers/video/s1d13xxxfb.c
|
||||
F: include/video/s1d13xxxfb.h
|
||||
|
||||
ETHEREXPRESS-16 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/eexpress.*
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
L: bridge@lists.linux-foundation.org
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -4,6 +4,7 @@
|
||||
* initial bootloader stuff..
|
||||
*/
|
||||
|
||||
#include <asm/pal.h>
|
||||
|
||||
.set noreorder
|
||||
.globl __start
|
||||
|
@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
|
||||
endif
|
||||
|
||||
ccflags-y := -fpic -fno-builtin -I$(obj)
|
||||
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
|
||||
asflags-y := -Wa,-march=all -DZIMAGE
|
||||
|
||||
# Supply kernel BSS size to the decompressor via a linker symbol.
|
||||
|
@ -5,15 +5,15 @@
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
u64 id;
|
||||
atomic64_t id;
|
||||
#endif
|
||||
unsigned int vmalloc_seq;
|
||||
unsigned int vmalloc_seq;
|
||||
} mm_context_t;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0ULL) << ASID_BITS)
|
||||
#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
|
||||
#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
|
||||
#else
|
||||
#define ASID(mm) (0)
|
||||
#endif
|
||||
@ -26,7 +26,7 @@ typedef struct {
|
||||
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned long end_brk;
|
||||
unsigned long end_brk;
|
||||
} mm_context_t;
|
||||
|
||||
#endif
|
||||
|
@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_ASID */
|
||||
|
||||
|
@ -34,10 +34,13 @@
|
||||
#define TLB_V6_D_ASID (1 << 17)
|
||||
#define TLB_V6_I_ASID (1 << 18)
|
||||
|
||||
#define TLB_V6_BP (1 << 19)
|
||||
|
||||
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
|
||||
#define TLB_V7_UIS_PAGE (1 << 19)
|
||||
#define TLB_V7_UIS_FULL (1 << 20)
|
||||
#define TLB_V7_UIS_ASID (1 << 21)
|
||||
#define TLB_V7_UIS_PAGE (1 << 20)
|
||||
#define TLB_V7_UIS_FULL (1 << 21)
|
||||
#define TLB_V7_UIS_ASID (1 << 22)
|
||||
#define TLB_V7_UIS_BP (1 << 23)
|
||||
|
||||
#define TLB_BARRIER (1 << 28)
|
||||
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
|
||||
@ -150,7 +153,8 @@
|
||||
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
|
||||
TLB_V6_I_FULL | TLB_V6_D_FULL | \
|
||||
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
|
||||
TLB_V6_I_ASID | TLB_V6_D_ASID)
|
||||
TLB_V6_I_ASID | TLB_V6_D_ASID | \
|
||||
TLB_V6_BP)
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V6
|
||||
# define v6wbi_possible_flags v6wbi_tlb_flags
|
||||
@ -166,9 +170,11 @@
|
||||
#endif
|
||||
|
||||
#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
|
||||
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
|
||||
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
|
||||
TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
|
||||
#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
|
||||
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
|
||||
TLB_V6_U_FULL | TLB_V6_U_PAGE | \
|
||||
TLB_V6_U_ASID | TLB_V6_BP)
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V7
|
||||
|
||||
@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void local_flush_bp_all(void)
|
||||
{
|
||||
const int zero = 0;
|
||||
const unsigned int __tlb_flag = __cpu_tlb_flags;
|
||||
|
||||
if (tlb_flag(TLB_V7_UIS_BP))
|
||||
asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
|
||||
else if (tlb_flag(TLB_V6_BP))
|
||||
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
|
||||
|
||||
if (tlb_flag(TLB_BARRIER))
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* flush_pmd_entry
|
||||
*
|
||||
@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
|
||||
#define flush_tlb_kernel_page local_flush_tlb_kernel_page
|
||||
#define flush_tlb_range local_flush_tlb_range
|
||||
#define flush_tlb_kernel_range local_flush_tlb_kernel_range
|
||||
#define flush_bp_all local_flush_bp_all
|
||||
#else
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_mm(struct mm_struct *mm);
|
||||
@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
|
||||
extern void flush_tlb_kernel_page(unsigned long kaddr);
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
|
||||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void flush_bp_all(void);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -404,7 +404,7 @@
|
||||
#define __NR_setns (__NR_SYSCALL_BASE+375)
|
||||
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
|
||||
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
|
||||
/* 378 for kcmp */
|
||||
#define __NR_kcmp (__NR_SYSCALL_BASE+378)
|
||||
#define __NR_finit_module (__NR_SYSCALL_BASE+379)
|
||||
|
||||
/*
|
||||
|
@ -110,7 +110,7 @@ int main(void)
|
||||
BLANK();
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
|
||||
BLANK();
|
||||
#endif
|
||||
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
|
||||
|
@ -387,7 +387,7 @@
|
||||
/* 375 */ CALL(sys_setns)
|
||||
CALL(sys_process_vm_readv)
|
||||
CALL(sys_process_vm_writev)
|
||||
CALL(sys_ni_syscall) /* reserved for sys_kcmp */
|
||||
CALL(sys_kcmp)
|
||||
CALL(sys_finit_module)
|
||||
#ifndef syscalls_counted
|
||||
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
|
||||
|
@ -184,13 +184,22 @@ __create_page_tables:
|
||||
orr r3, r3, #3 @ PGD block type
|
||||
mov r6, #4 @ PTRS_PER_PGD
|
||||
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
|
||||
1: str r3, [r0], #4 @ set bottom PGD entry bits
|
||||
1:
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
str r7, [r0], #4 @ set top PGD entry bits
|
||||
str r3, [r0], #4 @ set bottom PGD entry bits
|
||||
#else
|
||||
str r3, [r0], #4 @ set bottom PGD entry bits
|
||||
str r7, [r0], #4 @ set top PGD entry bits
|
||||
#endif
|
||||
add r3, r3, #0x1000 @ next PMD table
|
||||
subs r6, r6, #1
|
||||
bne 1b
|
||||
|
||||
add r4, r4, #0x1000 @ point to the PMD tables
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
add r4, r4, #4 @ we only write the bottom word
|
||||
#endif
|
||||
#endif
|
||||
|
||||
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
|
||||
@ -258,6 +267,11 @@ __create_page_tables:
|
||||
addne r6, r6, #1 << SECTION_SHIFT
|
||||
strne r6, [r3]
|
||||
|
||||
#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
|
||||
sub r4, r4, #4 @ Fixup page table pointer
|
||||
@ for 64-bit descriptors
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LL
|
||||
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
|
||||
/*
|
||||
@ -276,12 +290,16 @@ __create_page_tables:
|
||||
orr r3, r7, r3, lsl #SECTION_SHIFT
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
mov r7, #1 << (54 - 32) @ XN
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
str r7, [r0], #4
|
||||
str r3, [r0], #4
|
||||
#else
|
||||
str r3, [r0], #4
|
||||
str r7, [r0], #4
|
||||
#endif
|
||||
#else
|
||||
orr r3, r3, #PMD_SECT_XN
|
||||
#endif
|
||||
str r3, [r0], #4
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
str r7, [r0], #4
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
|
||||
|
@ -1023,7 +1023,7 @@ out_mdbgen:
|
||||
static int __cpuinit dbg_reset_notify(struct notifier_block *self,
|
||||
unsigned long action, void *cpu)
|
||||
{
|
||||
if (action == CPU_ONLINE)
|
||||
if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
|
||||
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
}
|
||||
|
||||
if (event->group_leader != event) {
|
||||
if (validate_group(event) != 0);
|
||||
if (validate_group(event) != 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static void __init armpmu_init(struct arm_pmu *armpmu)
|
||||
static void armpmu_init(struct arm_pmu *armpmu)
|
||||
{
|
||||
atomic_set(&armpmu->active_events, 0);
|
||||
mutex_init(&armpmu->reserve_mutex);
|
||||
|
@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
/*
|
||||
* PMXEVTYPER: Event selection reg
|
||||
*/
|
||||
#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
|
||||
#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
|
||||
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
|
||||
|
||||
/*
|
||||
|
@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
||||
* switch away from it before attempting any exclusive accesses.
|
||||
*/
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
enter_lazy_tlb(mm, current);
|
||||
local_flush_tlb_all();
|
||||
|
||||
|
@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
|
||||
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_bp_all(void *ignored)
|
||||
{
|
||||
local_flush_bp_all();
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
||||
void flush_bp_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_bp_all, NULL, 1);
|
||||
else
|
||||
local_flush_bp_all();
|
||||
}
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/smp_twd.h>
|
||||
#include <asm/localtimer.h>
|
||||
|
||||
@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
|
||||
struct device_node *np;
|
||||
int err;
|
||||
|
||||
if (!is_smp() || !setup_max_cpus)
|
||||
return;
|
||||
|
||||
np = of_find_matching_node(NULL, twd_of_match);
|
||||
if (!np)
|
||||
return;
|
||||
|
@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
ret = __cpu_suspend(arg, fn);
|
||||
if (ret == 0) {
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
|
@ -19,9 +19,9 @@
|
||||
1: subs r2, r2, #4 @ 1 do we have enough
|
||||
blt 5f @ 1 bytes to align with?
|
||||
cmp r3, #2 @ 1
|
||||
strltb r1, [r0], #1 @ 1
|
||||
strleb r1, [r0], #1 @ 1
|
||||
strb r1, [r0], #1 @ 1
|
||||
strltb r1, [ip], #1 @ 1
|
||||
strleb r1, [ip], #1 @ 1
|
||||
strb r1, [ip], #1 @ 1
|
||||
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
|
||||
/*
|
||||
* The pointer is now aligned and the length is adjusted. Try doing the
|
||||
@ -29,10 +29,14 @@
|
||||
*/
|
||||
|
||||
ENTRY(memset)
|
||||
ands r3, r0, #3 @ 1 unaligned?
|
||||
/*
|
||||
* Preserve the contents of r0 for the return value.
|
||||
*/
|
||||
mov ip, r0
|
||||
ands r3, ip, #3 @ 1 unaligned?
|
||||
bne 1b @ 1
|
||||
/*
|
||||
* we know that the pointer in r0 is aligned to a word boundary.
|
||||
* we know that the pointer in ip is aligned to a word boundary.
|
||||
*/
|
||||
orr r1, r1, r1, lsl #8
|
||||
orr r1, r1, r1, lsl #16
|
||||
@ -43,29 +47,28 @@ ENTRY(memset)
|
||||
#if ! CALGN(1)+0
|
||||
|
||||
/*
|
||||
* We need an extra register for this loop - save the return address and
|
||||
* use the LR
|
||||
* We need 2 extra registers for this loop - use r8 and the LR
|
||||
*/
|
||||
str lr, [sp, #-4]!
|
||||
mov ip, r1
|
||||
stmfd sp!, {r8, lr}
|
||||
mov r8, r1
|
||||
mov lr, r1
|
||||
|
||||
2: subs r2, r2, #64
|
||||
stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
|
||||
stmgeia r0!, {r1, r3, ip, lr}
|
||||
stmgeia r0!, {r1, r3, ip, lr}
|
||||
stmgeia r0!, {r1, r3, ip, lr}
|
||||
stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
|
||||
stmgeia ip!, {r1, r3, r8, lr}
|
||||
stmgeia ip!, {r1, r3, r8, lr}
|
||||
stmgeia ip!, {r1, r3, r8, lr}
|
||||
bgt 2b
|
||||
ldmeqfd sp!, {pc} @ Now <64 bytes to go.
|
||||
ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
|
||||
/*
|
||||
* No need to correct the count; we're only testing bits from now on
|
||||
*/
|
||||
tst r2, #32
|
||||
stmneia r0!, {r1, r3, ip, lr}
|
||||
stmneia r0!, {r1, r3, ip, lr}
|
||||
stmneia ip!, {r1, r3, r8, lr}
|
||||
stmneia ip!, {r1, r3, r8, lr}
|
||||
tst r2, #16
|
||||
stmneia r0!, {r1, r3, ip, lr}
|
||||
ldr lr, [sp], #4
|
||||
stmneia ip!, {r1, r3, r8, lr}
|
||||
ldmfd sp!, {r8, lr}
|
||||
|
||||
#else
|
||||
|
||||
@ -74,54 +77,54 @@ ENTRY(memset)
|
||||
* whole cache lines at once.
|
||||
*/
|
||||
|
||||
stmfd sp!, {r4-r7, lr}
|
||||
stmfd sp!, {r4-r8, lr}
|
||||
mov r4, r1
|
||||
mov r5, r1
|
||||
mov r6, r1
|
||||
mov r7, r1
|
||||
mov ip, r1
|
||||
mov r8, r1
|
||||
mov lr, r1
|
||||
|
||||
cmp r2, #96
|
||||
tstgt r0, #31
|
||||
tstgt ip, #31
|
||||
ble 3f
|
||||
|
||||
and ip, r0, #31
|
||||
rsb ip, ip, #32
|
||||
sub r2, r2, ip
|
||||
movs ip, ip, lsl #(32 - 4)
|
||||
stmcsia r0!, {r4, r5, r6, r7}
|
||||
stmmiia r0!, {r4, r5}
|
||||
tst ip, #(1 << 30)
|
||||
mov ip, r1
|
||||
strne r1, [r0], #4
|
||||
and r8, ip, #31
|
||||
rsb r8, r8, #32
|
||||
sub r2, r2, r8
|
||||
movs r8, r8, lsl #(32 - 4)
|
||||
stmcsia ip!, {r4, r5, r6, r7}
|
||||
stmmiia ip!, {r4, r5}
|
||||
tst r8, #(1 << 30)
|
||||
mov r8, r1
|
||||
strne r1, [ip], #4
|
||||
|
||||
3: subs r2, r2, #64
|
||||
stmgeia r0!, {r1, r3-r7, ip, lr}
|
||||
stmgeia r0!, {r1, r3-r7, ip, lr}
|
||||
stmgeia ip!, {r1, r3-r8, lr}
|
||||
stmgeia ip!, {r1, r3-r8, lr}
|
||||
bgt 3b
|
||||
ldmeqfd sp!, {r4-r7, pc}
|
||||
ldmeqfd sp!, {r4-r8, pc}
|
||||
|
||||
tst r2, #32
|
||||
stmneia r0!, {r1, r3-r7, ip, lr}
|
||||
stmneia ip!, {r1, r3-r8, lr}
|
||||
tst r2, #16
|
||||
stmneia r0!, {r4-r7}
|
||||
ldmfd sp!, {r4-r7, lr}
|
||||
stmneia ip!, {r4-r7}
|
||||
ldmfd sp!, {r4-r8, lr}
|
||||
|
||||
#endif
|
||||
|
||||
4: tst r2, #8
|
||||
stmneia r0!, {r1, r3}
|
||||
stmneia ip!, {r1, r3}
|
||||
tst r2, #4
|
||||
strne r1, [r0], #4
|
||||
strne r1, [ip], #4
|
||||
/*
|
||||
* When we get here, we've got less than 4 bytes to zero. We
|
||||
* may have an unaligned pointer as well.
|
||||
*/
|
||||
5: tst r2, #2
|
||||
strneb r1, [r0], #1
|
||||
strneb r1, [r0], #1
|
||||
strneb r1, [ip], #1
|
||||
strneb r1, [ip], #1
|
||||
tst r2, #1
|
||||
strneb r1, [r0], #1
|
||||
strneb r1, [ip], #1
|
||||
mov pc, lr
|
||||
ENDPROC(memset)
|
||||
|
@ -168,7 +168,7 @@ void __init netx_init_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
|
||||
vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
|
||||
|
||||
for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
|
||||
irq_set_chip_and_handler(irq, &netx_hif_chip,
|
||||
|
@ -17,42 +17,42 @@
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define NETX_IRQ_VIC_START 0
|
||||
#define NETX_IRQ_SOFTINT 0
|
||||
#define NETX_IRQ_TIMER0 1
|
||||
#define NETX_IRQ_TIMER1 2
|
||||
#define NETX_IRQ_TIMER2 3
|
||||
#define NETX_IRQ_SYSTIME_NS 4
|
||||
#define NETX_IRQ_SYSTIME_S 5
|
||||
#define NETX_IRQ_GPIO_15 6
|
||||
#define NETX_IRQ_WATCHDOG 7
|
||||
#define NETX_IRQ_UART0 8
|
||||
#define NETX_IRQ_UART1 9
|
||||
#define NETX_IRQ_UART2 10
|
||||
#define NETX_IRQ_USB 11
|
||||
#define NETX_IRQ_SPI 12
|
||||
#define NETX_IRQ_I2C 13
|
||||
#define NETX_IRQ_LCD 14
|
||||
#define NETX_IRQ_HIF 15
|
||||
#define NETX_IRQ_GPIO_0_14 16
|
||||
#define NETX_IRQ_XPEC0 17
|
||||
#define NETX_IRQ_XPEC1 18
|
||||
#define NETX_IRQ_XPEC2 19
|
||||
#define NETX_IRQ_XPEC3 20
|
||||
#define NETX_IRQ_XPEC(no) (17 + (no))
|
||||
#define NETX_IRQ_MSYNC0 21
|
||||
#define NETX_IRQ_MSYNC1 22
|
||||
#define NETX_IRQ_MSYNC2 23
|
||||
#define NETX_IRQ_MSYNC3 24
|
||||
#define NETX_IRQ_IRQ_PHY 25
|
||||
#define NETX_IRQ_ISO_AREA 26
|
||||
#define NETX_IRQ_VIC_START 64
|
||||
#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
|
||||
#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
|
||||
#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
|
||||
#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
|
||||
#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
|
||||
#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
|
||||
#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
|
||||
#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
|
||||
#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
|
||||
#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
|
||||
#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
|
||||
#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
|
||||
#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
|
||||
#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
|
||||
#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
|
||||
#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
|
||||
#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
|
||||
#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
|
||||
#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
|
||||
#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
|
||||
#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
|
||||
#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
|
||||
#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
|
||||
#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
|
||||
#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
|
||||
#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
|
||||
#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
|
||||
#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
|
||||
/* int 27 is reserved */
|
||||
/* int 28 is reserved */
|
||||
#define NETX_IRQ_TIMER3 29
|
||||
#define NETX_IRQ_TIMER4 30
|
||||
#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
|
||||
#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
|
||||
/* int 31 is reserved */
|
||||
|
||||
#define NETX_IRQS 32
|
||||
#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
|
||||
|
||||
/* for multiplexed irqs on gpio 0..14 */
|
||||
#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
|
||||
|
@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
{
|
||||
u64 asid = mm->context.id;
|
||||
u64 asid = atomic64_read(&mm->context.id);
|
||||
u64 generation = atomic64_read(&asid_generation);
|
||||
|
||||
if (asid != 0 && is_reserved_asid(asid)) {
|
||||
@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
}
|
||||
|
||||
mm->context.id = asid;
|
||||
return asid;
|
||||
}
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
u64 asid;
|
||||
|
||||
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||
__check_vmalloc_seq(mm);
|
||||
@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
|
||||
if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
|
||||
&& atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
|
||||
asid = atomic64_read(&mm->context.id);
|
||||
if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
|
||||
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
|
||||
goto switch_mm_fastpath;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
|
||||
/* Check that our ASID belongs to the current generation. */
|
||||
if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
|
||||
new_context(mm, cpu);
|
||||
asid = atomic64_read(&mm->context.id);
|
||||
if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
|
||||
asid = new_context(mm, cpu);
|
||||
atomic64_set(&mm->context.id, asid);
|
||||
}
|
||||
|
||||
atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
|
||||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
|
||||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
atomic64_set(&per_cpu(active_asids, cpu), asid);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
|
||||
|
||||
switch_mm_fastpath:
|
||||
|
@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
|
||||
{
|
||||
/* Switch to the identity mapping. */
|
||||
cpu_switch_mm(idmap_pgd, &init_mm);
|
||||
local_flush_bp_all();
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
/*
|
||||
|
@ -48,7 +48,7 @@
|
||||
ENTRY(cpu_v7_switch_mm)
|
||||
#ifdef CONFIG_MMU
|
||||
mmid r1, r1 @ get mm->context.id
|
||||
and r3, r1, #0xff
|
||||
asid r3, r1
|
||||
mov r3, r3, lsl #(48 - 32) @ ASID
|
||||
mcrr p15, 0, r0, r3, c2 @ set TTB 0
|
||||
isb
|
||||
|
@ -619,6 +619,7 @@ static struct file_system_type pfm_fs_type = {
|
||||
.mount = pfmfs_mount,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
MODULE_ALIAS_FS("pfmfs");
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
|
||||
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
|
||||
|
@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t;
|
||||
|
||||
#define ELF_PLATFORM (NULL)
|
||||
|
||||
#define SET_PERSONALITY(ex) \
|
||||
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
|
||||
|
||||
#define STACK_RND_MASK (0)
|
||||
|
||||
#ifdef CONFIG_METAG_USER_TCM
|
||||
|
@ -40,6 +40,7 @@ endchoice
|
||||
|
||||
config NUMA
|
||||
bool "Non Uniform Memory Access (NUMA) Support"
|
||||
select ARCH_WANT_NUMA_VARIABLE_LOCALITY
|
||||
help
|
||||
Some Meta systems have MMU-mappable on-chip memories with
|
||||
lower latencies than main memory. This enables support for
|
||||
|
@ -113,7 +113,7 @@
|
||||
STEPUP4((t)+16, fn)
|
||||
|
||||
_GLOBAL(powerpc_sha_transform)
|
||||
PPC_STLU r1,-STACKFRAMESIZE(r1)
|
||||
PPC_STLU r1,-INT_FRAME_SIZE(r1)
|
||||
SAVE_8GPRS(14, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
|
||||
@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
|
||||
|
||||
REST_8GPRS(14, r1)
|
||||
REST_10GPRS(22, r1)
|
||||
addi r1,r1,STACKFRAMESIZE
|
||||
addi r1,r1,INT_FRAME_SIZE
|
||||
blr
|
||||
|
@ -52,8 +52,6 @@
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() smp_mb()
|
||||
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
#define DEFINE_BITOP(fn, op, prefix, postfix) \
|
||||
static __inline__ void fn(unsigned long mask, \
|
||||
|
@ -266,7 +266,8 @@
|
||||
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
|
||||
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
|
||||
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
|
||||
#define FSCR_TAR (1<<8) /* Enable Target Adress Register */
|
||||
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
|
||||
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
|
||||
#define SPRN_TAR 0x32f /* Target Address Register */
|
||||
#define SPRN_LPCR 0x13E /* LPAR Control Register */
|
||||
#define LPCR_VPM0 (1ul << (63-0))
|
||||
|
@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
|
||||
COMPAT_SYS(process_vm_readv)
|
||||
COMPAT_SYS(process_vm_writev)
|
||||
SYSCALL(finit_module)
|
||||
SYSCALL(ni_syscall) /* sys_kcmp */
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 354
|
||||
#define __NR_syscalls 355
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
@ -376,6 +376,7 @@
|
||||
#define __NR_process_vm_readv 351
|
||||
#define __NR_process_vm_writev 352
|
||||
#define __NR_finit_module 353
|
||||
#define __NR_kcmp 354
|
||||
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
|
||||
|
||||
_GLOBAL(__setup_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
bl __init_hvmode_206
|
||||
mtlr r11
|
||||
beqlr
|
||||
@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
|
||||
mfspr r3,SPRN_LPCR
|
||||
oris r3, r3, LPCR_AIL_3@h
|
||||
bl __init_LPCR
|
||||
bl __init_FSCR
|
||||
bl __init_TLB
|
||||
mtlr r11
|
||||
blr
|
||||
|
||||
_GLOBAL(__restore_cpu_power8)
|
||||
mflr r11
|
||||
bl __init_FSCR
|
||||
mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
beqlr
|
||||
@ -115,7 +116,7 @@ __init_LPCR:
|
||||
|
||||
__init_FSCR:
|
||||
mfspr r3,SPRN_FSCR
|
||||
ori r3,r3,FSCR_TAR
|
||||
ori r3,r3,FSCR_TAR|FSCR_DSCR
|
||||
mtspr SPRN_FSCR,r3
|
||||
blr
|
||||
|
||||
|
@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
|
||||
mflr r10 ; \
|
||||
ld r12,PACAKBASE(r13) ; \
|
||||
LOAD_HANDLER(r12, system_call_entry_direct) ; \
|
||||
mtlr r12 ; \
|
||||
mtctr r12 ; \
|
||||
mfspr r12,SPRN_SRR1 ; \
|
||||
/* Re-use of r13... No spare regs to do this */ \
|
||||
li r13,MSR_RI ; \
|
||||
mtmsrd r13,1 ; \
|
||||
GET_PACA(r13) ; /* get r13 back */ \
|
||||
blr ;
|
||||
bctr ;
|
||||
#else
|
||||
/* We can branch directly */
|
||||
#define SYSCALL_PSERIES_2_DIRECT \
|
||||
|
@ -749,6 +749,7 @@ static struct file_system_type spufs_type = {
|
||||
.mount = spufs_mount,
|
||||
.kill_sb = kill_litter_super,
|
||||
};
|
||||
MODULE_ALIAS_FS("spufs");
|
||||
|
||||
static int __init spufs_init(void)
|
||||
{
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <asm/hvcall.h>
|
||||
#include <asm/hvcserver.h>
|
||||
@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
|
||||
= (unsigned int)last_p_partition_ID;
|
||||
|
||||
/* copy the Null-term char too */
|
||||
strncpy(&next_partner_info->location_code[0],
|
||||
strlcpy(&next_partner_info->location_code[0],
|
||||
(char *)&pi_buff[2],
|
||||
strlen((char *)&pi_buff[2]) + 1);
|
||||
sizeof(next_partner_info->location_code));
|
||||
|
||||
list_add_tail(&(next_partner_info->node), head);
|
||||
next_partner_info = NULL;
|
||||
|
@ -456,6 +456,7 @@ static struct file_system_type hypfs_type = {
|
||||
.mount = hypfs_mount,
|
||||
.kill_sb = hypfs_kill_super
|
||||
};
|
||||
MODULE_ALIAS_FS("s390_hypfs");
|
||||
|
||||
static const struct super_operations hypfs_s_ops = {
|
||||
.statfs = simple_statfs,
|
||||
|
@ -288,6 +288,9 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
||||
long compat_sys_fallocate(int fd, int mode,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi);
|
||||
long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
|
||||
unsigned int offset_low, loff_t __user * result,
|
||||
unsigned int origin);
|
||||
|
||||
/* Assembly trampoline to avoid clobbering r0. */
|
||||
long _compat_sys_rt_sigreturn(void);
|
||||
|
@ -32,50 +32,65 @@
|
||||
* adapt the usual convention.
|
||||
*/
|
||||
|
||||
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
|
||||
u32, low, u32, high)
|
||||
{
|
||||
return sys_truncate(filename, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
|
||||
u32, low, u32, high)
|
||||
{
|
||||
return sys_ftruncate(fd, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
|
||||
size_t, count, u32, dummy, u32, low, u32, high)
|
||||
{
|
||||
return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high)
|
||||
COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
|
||||
size_t, count, u32, dummy, u32, low, u32, high)
|
||||
{
|
||||
return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len)
|
||||
COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
|
||||
char __user *, buf, size_t, len)
|
||||
{
|
||||
return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
|
||||
}
|
||||
|
||||
long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 nbytes_lo, u32 nbytes_hi)
|
||||
COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
|
||||
u32, offset_lo, u32, offset_hi,
|
||||
u32, nbytes_lo, u32, nbytes_hi)
|
||||
{
|
||||
return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
((loff_t)nbytes_hi << 32) | nbytes_lo,
|
||||
flags);
|
||||
}
|
||||
|
||||
long compat_sys_fallocate(int fd, int mode,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi)
|
||||
COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
|
||||
u32, offset_lo, u32, offset_hi,
|
||||
u32, len_lo, u32, len_hi)
|
||||
{
|
||||
return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
((loff_t)len_hi << 32) | len_lo);
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid bug in generic sys_llseek() that specifies offset_high and
|
||||
* offset_low as "unsigned long", thus making it possible to pass
|
||||
* a sign-extended high 32 bits in offset_low.
|
||||
*/
|
||||
COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
|
||||
unsigned int, offset_low, loff_t __user *, result,
|
||||
unsigned int, origin)
|
||||
{
|
||||
return sys_llseek(fd, offset_high, offset_low, result, origin);
|
||||
}
|
||||
|
||||
/* Provide the compat syscall number to call mapping. */
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, call) [nr] = (call),
|
||||
@ -83,6 +98,7 @@ long compat_sys_fallocate(int fd, int mode,
|
||||
/* See comments in sys.c */
|
||||
#define compat_sys_fadvise64_64 sys32_fadvise64_64
|
||||
#define compat_sys_readahead sys32_readahead
|
||||
#define sys_llseek compat_sys_llseek
|
||||
|
||||
/* Call the assembly trampolines where necessary. */
|
||||
#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
|
||||
|
@ -14,13 +14,29 @@
|
||||
* analysis of kexec-tools; if other broken bootloaders initialize a
|
||||
* different set of fields we will need to figure out how to disambiguate.
|
||||
*
|
||||
* Note: efi_info is commonly left uninitialized, but that field has a
|
||||
* private magic, so it is better to leave it unchanged.
|
||||
*/
|
||||
static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
{
|
||||
/*
|
||||
* IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
|
||||
* this field. The purpose of this field is to guarantee
|
||||
* compliance with the x86 boot spec located in
|
||||
* Documentation/x86/boot.txt . That spec says that the
|
||||
* *whole* structure should be cleared, after which only the
|
||||
* portion defined by struct setup_header (boot_params->hdr)
|
||||
* should be copied in.
|
||||
*
|
||||
* If you're having an issue because the sentinel is set, you
|
||||
* need to change the whole structure to be cleared, not this
|
||||
* (or any other) individual field, or you will soon have
|
||||
* problems again.
|
||||
*/
|
||||
if (boot_params->sentinel) {
|
||||
/*fields in boot_params are not valid, clear them */
|
||||
/* fields in boot_params are left uninitialized, clear them */
|
||||
memset(&boot_params->olpc_ofw_header, 0,
|
||||
(char *)&boot_params->alt_mem_k -
|
||||
(char *)&boot_params->efi_info -
|
||||
(char *)&boot_params->olpc_ofw_header);
|
||||
memset(&boot_params->kbd_status, 0,
|
||||
(char *)&boot_params->hdr -
|
||||
|
@ -171,9 +171,15 @@ static struct resource bss_resource = {
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* cpu data as detected by the assembly code in head.S */
|
||||
struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
|
||||
struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
|
||||
.wp_works_ok = -1,
|
||||
.fdiv_bug = -1,
|
||||
};
|
||||
/* common cpu data for all cpus */
|
||||
struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
|
||||
struct cpuinfo_x86 boot_cpu_data __read_mostly = {
|
||||
.wp_works_ok = -1,
|
||||
.fdiv_bug = -1,
|
||||
};
|
||||
EXPORT_SYMBOL(boot_cpu_data);
|
||||
|
||||
unsigned int def_to_bigsmp;
|
||||
|
@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void)
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
unsigned int highest_cstate = 0;
|
||||
unsigned int highest_subcstate = 0;
|
||||
int i;
|
||||
void *mwait_ptr;
|
||||
struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
|
||||
int i;
|
||||
|
||||
if (!this_cpu_has(X86_FEATURE_MWAIT))
|
||||
return;
|
||||
|
@ -410,9 +410,8 @@ void __init init_mem_mapping(void)
|
||||
/* the ISA range is always mapped regardless of memory holes */
|
||||
init_memory_mapping(0, ISA_END_ADDRESS);
|
||||
|
||||
/* xen has big range in reserved near end of ram, skip it at first */
|
||||
addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE,
|
||||
PAGE_SIZE);
|
||||
/* xen has big range in reserved near end of ram, skip it at first.*/
|
||||
addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
|
||||
real_end = addr + PMD_SIZE;
|
||||
|
||||
/* step_size need to be small so pgt_buf from BRK could cover it */
|
||||
|
@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
|
||||
if (base > __pa(high_memory-1))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* some areas in the middle of the kernel identity range
|
||||
* are not mapped, like the PCI space.
|
||||
*/
|
||||
if (!page_is_ram(base >> PAGE_SHIFT))
|
||||
return 0;
|
||||
|
||||
id_sz = (__pa(high_memory-1) <= base + size) ?
|
||||
__pa(high_memory) - base :
|
||||
size;
|
||||
|
@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
|
||||
{
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
if (type && type->bus && type->find_device) {
|
||||
if (type && type->match && type->find_device) {
|
||||
down_write(&bus_type_sem);
|
||||
list_add_tail(&type->list, &bus_type_list);
|
||||
up_write(&bus_type_sem);
|
||||
printk(KERN_INFO PREFIX "bus type %s registered\n",
|
||||
type->bus->name);
|
||||
printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
|
||||
down_write(&bus_type_sem);
|
||||
list_del_init(&type->list);
|
||||
up_write(&bus_type_sem);
|
||||
printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
|
||||
type->bus->name);
|
||||
printk(KERN_INFO PREFIX "bus type %s unregistered\n",
|
||||
type->name);
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
|
||||
|
||||
static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
|
||||
static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *tmp, *ret = NULL;
|
||||
|
||||
if (!type)
|
||||
return NULL;
|
||||
|
||||
down_read(&bus_type_sem);
|
||||
list_for_each_entry(tmp, &bus_type_list, list) {
|
||||
if (tmp->bus == type) {
|
||||
if (tmp->match(dev)) {
|
||||
ret = tmp;
|
||||
break;
|
||||
}
|
||||
@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
|
||||
{
|
||||
struct acpi_bus_type *tmp;
|
||||
int ret = -ENODEV;
|
||||
|
||||
down_read(&bus_type_sem);
|
||||
list_for_each_entry(tmp, &bus_type_list, list) {
|
||||
if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_read(&bus_type_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
|
||||
void *addr_p, void **ret_p)
|
||||
{
|
||||
@ -261,29 +241,12 @@ err:
|
||||
|
||||
static int acpi_platform_notify(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *type;
|
||||
struct acpi_bus_type *type = acpi_get_bus_type(dev);
|
||||
acpi_handle handle;
|
||||
int ret;
|
||||
|
||||
ret = acpi_bind_one(dev, NULL);
|
||||
if (ret && (!dev->bus || !dev->parent)) {
|
||||
/* bridge devices genernally haven't bus or parent */
|
||||
ret = acpi_find_bridge_device(dev, &handle);
|
||||
if (!ret) {
|
||||
ret = acpi_bind_one(dev, handle);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
type = acpi_get_bus_type(dev->bus);
|
||||
if (ret) {
|
||||
if (!type || !type->find_device) {
|
||||
DBG("No ACPI bus support for %s\n", dev_name(dev));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret && type) {
|
||||
ret = type->find_device(dev, &handle);
|
||||
if (ret) {
|
||||
DBG("Unable to get handle for %s\n", dev_name(dev));
|
||||
@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *type;
|
||||
|
||||
type = acpi_get_bus_type(dev->bus);
|
||||
type = acpi_get_bus_type(dev);
|
||||
if (type && type->cleanup)
|
||||
type->cleanup(dev);
|
||||
|
||||
|
@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
}
|
||||
|
||||
exit:
|
||||
if (buffer.pointer)
|
||||
kfree(buffer.pointer);
|
||||
kfree(buffer.pointer);
|
||||
return apic_id;
|
||||
}
|
||||
|
||||
|
@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
|
||||
BUG_ON(pr->id >= nr_cpu_ids);
|
||||
|
||||
/*
|
||||
* Buggy BIOS check
|
||||
|
@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
|
||||
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
sleep_states[i] = 1;
|
||||
pr_cont(" S%d", i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
|
||||
hibernation_set_ops(old_suspend_ordering ?
|
||||
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
|
||||
sleep_states[ACPI_STATE_S4] = 1;
|
||||
pr_cont(KERN_CONT " S4");
|
||||
if (nosigcheck)
|
||||
return;
|
||||
|
||||
@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
|
||||
{
|
||||
acpi_status status;
|
||||
u8 type_a, type_b;
|
||||
char supported[ACPI_S_STATE_COUNT * 3 + 1];
|
||||
char *pos = supported;
|
||||
int i;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
|
||||
acpi_sleep_dmi_check();
|
||||
|
||||
sleep_states[ACPI_STATE_S0] = 1;
|
||||
pr_info(PREFIX "(supports S0");
|
||||
|
||||
acpi_sleep_suspend_setup();
|
||||
acpi_sleep_hibernate_setup();
|
||||
@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
|
||||
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
sleep_states[ACPI_STATE_S5] = 1;
|
||||
pr_cont(" S5");
|
||||
pm_power_off_prepare = acpi_power_off_prepare;
|
||||
pm_power_off = acpi_power_off;
|
||||
}
|
||||
pr_cont(")\n");
|
||||
|
||||
supported[0] = 0;
|
||||
for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
|
||||
if (sleep_states[i])
|
||||
pos += sprintf(pos, " S%d", i);
|
||||
}
|
||||
pr_info(PREFIX "(supports%s)\n", supported);
|
||||
|
||||
/*
|
||||
* Register the tts_notifier to reboot notifier list so that the _TTS
|
||||
* object can also be evaluated when the system enters S5.
|
||||
|
@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static struct acpi_bus_type ata_acpi_bus = {
|
||||
.find_bridge = ata_acpi_find_dummy,
|
||||
.name = "ATA",
|
||||
.find_device = ata_acpi_find_device,
|
||||
};
|
||||
|
||||
|
@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
|
||||
dev_warn(dev, "parent %s should not be sleeping\n",
|
||||
dev_name(dev->parent));
|
||||
list_add_tail(&dev->power.entry, &dpm_list);
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
}
|
||||
|
||||
@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
|
||||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
complete_all(&dev->power.completion);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
list_del_init(&dev->power.entry);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
device_wakeup_disable(dev);
|
||||
|
@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
|
||||
{
|
||||
if (!dev->power.early_init) {
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
dev->power.qos = NULL;
|
||||
dev->power.early_init = true;
|
||||
}
|
||||
}
|
||||
@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
|
||||
|
||||
static inline void device_pm_sleep_init(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_add(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
}
|
||||
static inline void device_pm_add(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_remove(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
pm_runtime_remove(dev);
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
|
||||
struct pm_qos_flags *pqf;
|
||||
s32 val;
|
||||
|
||||
if (!qos)
|
||||
if (IS_ERR_OR_NULL(qos))
|
||||
return PM_QOS_FLAGS_UNDEFINED;
|
||||
|
||||
pqf = &qos->flags;
|
||||
@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
|
||||
*/
|
||||
s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
|
||||
return IS_ERR_OR_NULL(dev->power.qos) ?
|
||||
0 : pm_qos_read_value(&dev->power.qos->latency);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
|
||||
* @dev: target device
|
||||
*
|
||||
* Called from the device PM subsystem during device insertion under
|
||||
* device_pm_lock().
|
||||
*/
|
||||
void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
dev->power.qos = NULL;
|
||||
dev->power.power_state = PMSG_ON;
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_destroy
|
||||
@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
struct pm_qos_constraints *c;
|
||||
struct pm_qos_flags *f;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/*
|
||||
* If the device's PM QoS resume latency limit or PM QoS flags have been
|
||||
* exposed to user space, they have to be hidden at this point.
|
||||
*/
|
||||
dev_pm_qos_hide_latency_limit(dev);
|
||||
dev_pm_qos_hide_flags(dev);
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
qos = dev->power.qos;
|
||||
if (!qos)
|
||||
goto out;
|
||||
@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.qos = NULL;
|
||||
dev->power.qos = ERR_PTR(-ENODEV);
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
kfree(c->notifiers);
|
||||
@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
"%s() called for already added request\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
req->dev = dev;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.qos) {
|
||||
if (dev->power.power_state.event == PM_EVENT_INVALID) {
|
||||
/* The device has been removed from the system. */
|
||||
req->dev = NULL;
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
} else {
|
||||
/*
|
||||
* Allocate the constraints data on the first call to
|
||||
* add_request, i.e. only if the data is not already
|
||||
* allocated and if the device has not been removed.
|
||||
*/
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
}
|
||||
}
|
||||
if (IS_ERR(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (!dev->power.qos)
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
|
||||
if (!ret) {
|
||||
req->dev = dev;
|
||||
req->type = type;
|
||||
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
return ret;
|
||||
@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
||||
s32 curr_value;
|
||||
int ret = 0;
|
||||
|
||||
if (!req->dev->power.qos)
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(!dev_pm_qos_request_active(req),
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_ERR_OR_NULL(req->dev->power.qos))
|
||||
return -ENODEV;
|
||||
|
||||
switch(req->type) {
|
||||
@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
ret = __dev_pm_qos_update_request(req, new_value);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
|
||||
static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
ret = __dev_pm_qos_update_request(req, new_value);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
if (IS_ERR_OR_NULL(req->dev->power.qos))
|
||||
return -ENODEV;
|
||||
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_request - modifies an existing qos request
|
||||
@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
*/
|
||||
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(!dev_pm_qos_request_active(req),
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (req->dev->power.qos) {
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
} else {
|
||||
/* Return if the device has been removed */
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
ret = __dev_pm_qos_remove_request(req);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.qos)
|
||||
ret = dev->power.power_state.event != PM_EVENT_INVALID ?
|
||||
dev_pm_qos_constraints_allocate(dev) : -ENODEV;
|
||||
if (IS_ERR(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (!dev->power.qos)
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
|
||||
if (!ret)
|
||||
ret = blocking_notifier_chain_register(
|
||||
@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/* Silently return if the constraints object is not present. */
|
||||
if (dev->power.qos)
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos))
|
||||
retval = blocking_notifier_chain_unregister(
|
||||
dev->power.qos->latency.notifiers,
|
||||
notifier);
|
||||
@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
|
||||
static void __dev_pm_qos_drop_user_request(struct device *dev,
|
||||
enum dev_pm_qos_req_type type)
|
||||
{
|
||||
struct dev_pm_qos_request *req = NULL;
|
||||
|
||||
switch(type) {
|
||||
case DEV_PM_QOS_LATENCY:
|
||||
dev_pm_qos_remove_request(dev->power.qos->latency_req);
|
||||
req = dev->power.qos->latency_req;
|
||||
dev->power.qos->latency_req = NULL;
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
dev_pm_qos_remove_request(dev->power.qos->flags_req);
|
||||
req = dev->power.qos->flags_req;
|
||||
dev->power.qos->flags_req = NULL;
|
||||
break;
|
||||
}
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
|
||||
if (!device_is_registered(dev) || value < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.qos && dev->power.qos->latency_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (dev->power.qos->latency_req)
|
||||
ret = -EEXIST;
|
||||
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->latency_req = req;
|
||||
ret = pm_qos_sysfs_add_latency(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
|
||||
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
|
||||
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
|
||||
*/
|
||||
void dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (dev->power.qos && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
|
||||
|
||||
@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
|
||||
if (!device_is_registered(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.qos && dev->power.qos->flags_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
if (ret < 0) {
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (dev->power.qos->flags_req)
|
||||
ret = -EEXIST;
|
||||
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->flags_req = req;
|
||||
ret = pm_qos_sysfs_add_flags(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
|
||||
fail:
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
|
||||
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
|
||||
* @dev: Device whose PM QoS flags are to be hidden from user space.
|
||||
*/
|
||||
void dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (dev->power.qos && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
pm_runtime_get_sync(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
|
||||
|
||||
@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
||||
s32 value;
|
||||
int ret;
|
||||
|
||||
if (!dev->power.qos || !dev->power.qos->flags_req)
|
||||
return -EINVAL;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
value = dev_pm_qos_requested_flags(dev);
|
||||
if (set)
|
||||
value |= mask;
|
||||
@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
||||
|
||||
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else /* !CONFIG_PM_RUNTIME */
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev) {}
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
|
||||
|
||||
void dpm_sysfs_remove(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
rpm_sysfs_remove(dev);
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
|
||||
sysfs_remove_group(&dev->kobj, &pm_attr_group);
|
||||
|
@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
|
||||
if (ret < 0) {
|
||||
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
|
||||
ret);
|
||||
pm_runtime_put(map->dev);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
}
|
||||
|
@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_init(&pc_host->cfgspace_lock);
|
||||
|
||||
pc->host_controller = pc_host;
|
||||
pc_host->pci_controller.io_resource = &pc_host->io_resource;
|
||||
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
||||
@ -52,8 +53,12 @@ static struct hwrng *current_rng;
|
||||
static LIST_HEAD(rng_list);
|
||||
static DEFINE_MUTEX(rng_mutex);
|
||||
static int data_avail;
|
||||
static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
|
||||
__cacheline_aligned;
|
||||
static u8 *rng_buffer;
|
||||
|
||||
static size_t rng_buffer_size(void)
|
||||
{
|
||||
return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
|
||||
}
|
||||
|
||||
static inline int hwrng_init(struct hwrng *rng)
|
||||
{
|
||||
@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||
|
||||
if (!data_avail) {
|
||||
bytes_read = rng_get_data(current_rng, rng_buffer,
|
||||
sizeof(rng_buffer),
|
||||
rng_buffer_size(),
|
||||
!(filp->f_flags & O_NONBLOCK));
|
||||
if (bytes_read < 0) {
|
||||
err = bytes_read;
|
||||
@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
|
||||
|
||||
mutex_lock(&rng_mutex);
|
||||
|
||||
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
|
||||
err = -ENOMEM;
|
||||
if (!rng_buffer) {
|
||||
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
|
||||
if (!rng_buffer)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Must not register two RNGs with the same name. */
|
||||
err = -EEXIST;
|
||||
list_for_each_entry(tmp, &rng_list, list) {
|
||||
|
@ -852,6 +852,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
||||
int reserved)
|
||||
{
|
||||
unsigned long flags;
|
||||
int wakeup_write = 0;
|
||||
|
||||
/* Hold lock while accounting */
|
||||
spin_lock_irqsave(&r->lock, flags);
|
||||
@ -873,10 +874,8 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
||||
else
|
||||
r->entropy_count = reserved;
|
||||
|
||||
if (r->entropy_count < random_write_wakeup_thresh) {
|
||||
wake_up_interruptible(&random_write_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||
}
|
||||
if (r->entropy_count < random_write_wakeup_thresh)
|
||||
wakeup_write = 1;
|
||||
}
|
||||
|
||||
DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
|
||||
@ -884,6 +883,11 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
||||
|
||||
spin_unlock_irqrestore(&r->lock, flags);
|
||||
|
||||
if (wakeup_write) {
|
||||
wake_up_interruptible(&random_write_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||
}
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
||||
(task_active_pid_ns(current) != &init_pid_ns))
|
||||
return;
|
||||
|
||||
/* Can only change if privileged. */
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
err = EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mc_op = (enum proc_cn_mcast_op *)msg->data;
|
||||
switch (*mc_op) {
|
||||
case PROC_CN_MCAST_LISTEN:
|
||||
@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
||||
err = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
cn_proc_ack(err, msg->seq, msg->ack);
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
|
||||
* dbs: used as a shortform for demand based switching It helps to keep variable
|
||||
* names smaller, simpler
|
||||
* cdbs: common dbs
|
||||
* on_*: On-demand governor
|
||||
* od_*: On-demand governor
|
||||
* cs_*: Conservative governor
|
||||
*/
|
||||
|
||||
|
@ -28,13 +28,7 @@
|
||||
|
||||
static int hb_voltage_change(unsigned int freq)
|
||||
{
|
||||
int i;
|
||||
u32 msg[HB_CPUFREQ_IPC_LEN];
|
||||
|
||||
msg[0] = HB_CPUFREQ_CHANGE_NOTE;
|
||||
msg[1] = freq / 1000000;
|
||||
for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
|
||||
msg[i] = 0;
|
||||
u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
|
||||
|
||||
return pl320_ipc_transmit(msg);
|
||||
}
|
||||
|
@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
if (!policy->cpuinfo.max_freq)
|
||||
return -ENODEV;
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min, &max);
|
||||
|
||||
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
|
||||
@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static void intel_pstate_exit(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
sysfs_remove_group(intel_pstate_kobject,
|
||||
&intel_pstate_attr_group);
|
||||
debugfs_remove_recursive(debugfs_parent);
|
||||
|
||||
cpufreq_unregister_driver(&intel_pstate_driver);
|
||||
|
||||
if (!all_cpu_data)
|
||||
return;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
del_timer_sync(&all_cpu_data[cpu]->timer);
|
||||
kfree(all_cpu_data[cpu]);
|
||||
}
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
vfree(all_cpu_data);
|
||||
}
|
||||
module_exit(intel_pstate_exit);
|
||||
|
||||
static int __initdata no_load;
|
||||
|
||||
static int __init intel_pstate_init(void)
|
||||
{
|
||||
int rc = 0;
|
||||
int cpu, rc = 0;
|
||||
const struct x86_cpu_id *id;
|
||||
|
||||
if (no_load)
|
||||
@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
|
||||
intel_pstate_sysfs_expose_params();
|
||||
return rc;
|
||||
out:
|
||||
intel_pstate_exit();
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
del_timer_sync(&all_cpu_data[cpu]->timer);
|
||||
kfree(all_cpu_data[cpu]);
|
||||
}
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
vfree(all_cpu_data);
|
||||
return -ENODEV;
|
||||
}
|
||||
device_initcall(intel_pstate_init);
|
||||
|
@ -442,7 +442,6 @@ static int __init dmi_present(const char __iomem *p)
|
||||
static int __init smbios_present(const char __iomem *p)
|
||||
{
|
||||
u8 buf[32];
|
||||
int offset = 0;
|
||||
|
||||
memcpy_fromio(buf, p, 32);
|
||||
if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
|
||||
@ -461,9 +460,9 @@ static int __init smbios_present(const char __iomem *p)
|
||||
dmi_ver = 0x0206;
|
||||
break;
|
||||
}
|
||||
offset = 16;
|
||||
return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
|
||||
}
|
||||
return dmi_present(buf + offset);
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __init dmi_scan_machine(void)
|
||||
|
@ -426,6 +426,44 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
|
||||
return status;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
check_var_size_locked(struct efivars *efivars, u32 attributes,
|
||||
unsigned long size)
|
||||
{
|
||||
u64 storage_size, remaining_size, max_size;
|
||||
efi_status_t status;
|
||||
const struct efivar_operations *fops = efivars->ops;
|
||||
|
||||
if (!efivars->ops->query_variable_info)
|
||||
return EFI_UNSUPPORTED;
|
||||
|
||||
status = fops->query_variable_info(attributes, &storage_size,
|
||||
&remaining_size, &max_size);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (!storage_size || size > remaining_size || size > max_size ||
|
||||
(remaining_size - size) < (storage_size / 2))
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
static efi_status_t
|
||||
check_var_size(struct efivars *efivars, u32 attributes, unsigned long size)
|
||||
{
|
||||
efi_status_t status;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&efivars->lock, flags);
|
||||
status = check_var_size_locked(efivars, attributes, size);
|
||||
spin_unlock_irqrestore(&efivars->lock, flags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
efivar_guid_read(struct efivar_entry *entry, char *buf)
|
||||
{
|
||||
@ -547,11 +585,16 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
|
||||
}
|
||||
|
||||
spin_lock_irq(&efivars->lock);
|
||||
status = efivars->ops->set_variable(new_var->VariableName,
|
||||
&new_var->VendorGuid,
|
||||
new_var->Attributes,
|
||||
new_var->DataSize,
|
||||
new_var->Data);
|
||||
|
||||
status = check_var_size_locked(efivars, new_var->Attributes,
|
||||
new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
|
||||
|
||||
if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
|
||||
status = efivars->ops->set_variable(new_var->VariableName,
|
||||
&new_var->VendorGuid,
|
||||
new_var->Attributes,
|
||||
new_var->DataSize,
|
||||
new_var->Data);
|
||||
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
|
||||
@ -702,8 +745,7 @@ static ssize_t efivarfs_file_write(struct file *file,
|
||||
u32 attributes;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
unsigned long datasize = count - sizeof(attributes);
|
||||
unsigned long newdatasize;
|
||||
u64 storage_size, remaining_size, max_size;
|
||||
unsigned long newdatasize, varsize;
|
||||
ssize_t bytes = 0;
|
||||
|
||||
if (count < sizeof(attributes))
|
||||
@ -722,28 +764,18 @@ static ssize_t efivarfs_file_write(struct file *file,
|
||||
* amounts of memory. Pick a default size of 64K if
|
||||
* QueryVariableInfo() isn't supported by the firmware.
|
||||
*/
|
||||
spin_lock_irq(&efivars->lock);
|
||||
|
||||
if (!efivars->ops->query_variable_info)
|
||||
status = EFI_UNSUPPORTED;
|
||||
else {
|
||||
const struct efivar_operations *fops = efivars->ops;
|
||||
status = fops->query_variable_info(attributes, &storage_size,
|
||||
&remaining_size, &max_size);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
|
||||
status = check_var_size(efivars, attributes, varsize);
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
if (status != EFI_UNSUPPORTED)
|
||||
return efi_status_to_err(status);
|
||||
|
||||
remaining_size = 65536;
|
||||
if (datasize > 65536)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (datasize > remaining_size)
|
||||
return -ENOSPC;
|
||||
|
||||
data = kmalloc(datasize, GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
@ -765,6 +797,19 @@ static ssize_t efivarfs_file_write(struct file *file,
|
||||
*/
|
||||
spin_lock_irq(&efivars->lock);
|
||||
|
||||
/*
|
||||
* Ensure that the available space hasn't shrunk below the safe level
|
||||
*/
|
||||
|
||||
status = check_var_size_locked(efivars, attributes, varsize);
|
||||
|
||||
if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) {
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
kfree(data);
|
||||
|
||||
return efi_status_to_err(status);
|
||||
}
|
||||
|
||||
status = efivars->ops->set_variable(var->var.VariableName,
|
||||
&var->var.VendorGuid,
|
||||
attributes, datasize,
|
||||
@ -929,8 +974,8 @@ static bool efivarfs_valid_name(const char *str, int len)
|
||||
if (len < GUID_LEN + 2)
|
||||
return false;
|
||||
|
||||
/* GUID should be right after the first '-' */
|
||||
if (s - 1 != strchr(str, '-'))
|
||||
/* GUID must be preceded by a '-' */
|
||||
if (*(s - 1) != '-')
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -1118,15 +1163,22 @@ static struct dentry_operations efivarfs_d_ops = {
|
||||
|
||||
static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
|
||||
{
|
||||
struct dentry *d;
|
||||
struct qstr q;
|
||||
int err;
|
||||
|
||||
q.name = name;
|
||||
q.len = strlen(name);
|
||||
|
||||
if (efivarfs_d_hash(NULL, NULL, &q))
|
||||
return NULL;
|
||||
err = efivarfs_d_hash(NULL, NULL, &q);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return d_alloc(parent, &q);
|
||||
d = d_alloc(parent, &q);
|
||||
if (d)
|
||||
return d;
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
@ -1136,6 +1188,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
struct efivar_entry *entry, *n;
|
||||
struct efivars *efivars = &__efivars;
|
||||
char *name;
|
||||
int err = -ENOMEM;
|
||||
|
||||
efivarfs_sb = sb;
|
||||
|
||||
@ -1186,8 +1239,10 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto fail_name;
|
||||
|
||||
dentry = efivarfs_alloc_dentry(root, name);
|
||||
if (!dentry)
|
||||
if (IS_ERR(dentry)) {
|
||||
err = PTR_ERR(dentry);
|
||||
goto fail_inode;
|
||||
}
|
||||
|
||||
/* copied by the above to local storage in the dentry. */
|
||||
kfree(name);
|
||||
@ -1214,7 +1269,7 @@ fail_inode:
|
||||
fail_name:
|
||||
kfree(name);
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
|
||||
@ -1234,6 +1289,7 @@ static struct file_system_type efivarfs_type = {
|
||||
.mount = efivarfs_mount,
|
||||
.kill_sb = efivarfs_kill_sb,
|
||||
};
|
||||
MODULE_ALIAS_FS("efivarfs");
|
||||
|
||||
/*
|
||||
* Handle negative dentry.
|
||||
@ -1345,7 +1401,6 @@ static int efi_pstore_write(enum pstore_type_id type,
|
||||
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
|
||||
struct efivars *efivars = psi->data;
|
||||
int i, ret = 0;
|
||||
u64 storage_space, remaining_space, max_variable_size;
|
||||
efi_status_t status = EFI_NOT_FOUND;
|
||||
unsigned long flags;
|
||||
|
||||
@ -1365,11 +1420,11 @@ static int efi_pstore_write(enum pstore_type_id type,
|
||||
* size: a size of logging data
|
||||
* DUMP_NAME_LEN * 2: a maximum size of variable name
|
||||
*/
|
||||
status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES,
|
||||
&storage_space,
|
||||
&remaining_space,
|
||||
&max_variable_size);
|
||||
if (status || remaining_space < size + DUMP_NAME_LEN * 2) {
|
||||
|
||||
status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
|
||||
size + DUMP_NAME_LEN * 2);
|
||||
|
||||
if (status) {
|
||||
spin_unlock_irqrestore(&efivars->lock, flags);
|
||||
*id = part;
|
||||
return -ENOSPC;
|
||||
@ -1544,6 +1599,14 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
status = check_var_size_locked(efivars, new_var->Attributes,
|
||||
new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
|
||||
|
||||
if (status && status != EFI_UNSUPPORTED) {
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
return efi_status_to_err(status);
|
||||
}
|
||||
|
||||
/* now *really* create the variable via EFI */
|
||||
status = efivars->ops->set_variable(new_var->VariableName,
|
||||
&new_var->VendorGuid,
|
||||
|
@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
|
||||
return data & (1 << bit) ? 1 : 0;
|
||||
}
|
||||
|
||||
static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
|
||||
static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
|
||||
{
|
||||
return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
|
||||
return ichx_priv.use_gpio & (1 << (nr / 32));
|
||||
}
|
||||
|
||||
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
|
||||
|
@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
|
||||
static void gpiod_free(struct gpio_desc *desc);
|
||||
static int gpiod_direction_input(struct gpio_desc *desc);
|
||||
static int gpiod_direction_output(struct gpio_desc *desc, int value);
|
||||
static int gpiod_get_direction(const struct gpio_desc *desc);
|
||||
static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
|
||||
static int gpiod_get_value_cansleep(struct gpio_desc *desc);
|
||||
static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
|
||||
static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
|
||||
static int gpiod_get_value(struct gpio_desc *desc);
|
||||
static int gpiod_get_value(const struct gpio_desc *desc);
|
||||
static void gpiod_set_value(struct gpio_desc *desc, int value);
|
||||
static int gpiod_cansleep(struct gpio_desc *desc);
|
||||
static int gpiod_to_irq(struct gpio_desc *desc);
|
||||
static int gpiod_cansleep(const struct gpio_desc *desc);
|
||||
static int gpiod_to_irq(const struct gpio_desc *desc);
|
||||
static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
|
||||
static int gpiod_export_link(struct device *dev, const char *name,
|
||||
struct gpio_desc *desc);
|
||||
@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* caller holds gpio_lock *OR* gpio is marked as requested */
|
||||
static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
|
||||
static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
|
||||
{
|
||||
return desc->chip;
|
||||
return desc ? desc->chip : NULL;
|
||||
}
|
||||
|
||||
/* caller holds gpio_lock *OR* gpio is marked as requested */
|
||||
struct gpio_chip *gpio_to_chip(unsigned gpio)
|
||||
{
|
||||
return gpiod_to_chip(gpio_to_desc(gpio));
|
||||
@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
|
||||
}
|
||||
|
||||
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
|
||||
static int gpiod_get_direction(struct gpio_desc *desc)
|
||||
static int gpiod_get_direction(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
unsigned offset;
|
||||
@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
|
||||
if (status > 0) {
|
||||
/* GPIOF_DIR_IN, or other positive */
|
||||
status = 1;
|
||||
clear_bit(FLAG_IS_OUT, &desc->flags);
|
||||
/* FLAG_IS_OUT is just a cache of the result of get_direction(),
|
||||
* so it does not affect constness per se */
|
||||
clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
|
||||
}
|
||||
if (status == 0) {
|
||||
/* GPIOF_DIR_OUT */
|
||||
set_bit(FLAG_IS_OUT, &desc->flags);
|
||||
set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
|
||||
static ssize_t gpio_direction_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct gpio_desc *desc = dev_get_drvdata(dev);
|
||||
const struct gpio_desc *desc = dev_get_drvdata(dev);
|
||||
ssize_t status;
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
|
||||
goto done;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
/* reject invalid GPIOs */
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||
* request and export were done by on behalf of userspace, so
|
||||
@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
|
||||
if (status < 0)
|
||||
goto done;
|
||||
|
||||
status = -EINVAL;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
/* reject bogus commands (gpio_unexport ignores them) */
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
status = -EINVAL;
|
||||
|
||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||
* request and export were done by on behalf of userspace, so
|
||||
@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
|
||||
{
|
||||
int status = -EINVAL;
|
||||
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
||||
@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
|
||||
|
||||
mutex_unlock(&sysfs_lock);
|
||||
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
||||
struct device *dev = NULL;
|
||||
int status = -EINVAL;
|
||||
|
||||
if (!desc)
|
||||
goto done;
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
|
||||
@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
|
||||
unlock:
|
||||
mutex_unlock(&sysfs_lock);
|
||||
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
|
||||
struct device *dev = NULL;
|
||||
|
||||
if (!desc) {
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&sysfs_lock);
|
||||
@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
}
|
||||
done:
|
||||
|
||||
if (status)
|
||||
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
|
||||
status);
|
||||
@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
||||
int status = -EPROBE_DEFER;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc) {
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
chip = desc->chip;
|
||||
if (chip == NULL)
|
||||
goto done;
|
||||
@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
||||
done:
|
||||
if (status)
|
||||
pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
|
||||
desc ? desc_to_gpio(desc) : -1,
|
||||
label ? : "?", status);
|
||||
desc_to_gpio(desc), label ? : "?", status);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
return status;
|
||||
}
|
||||
@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
|
||||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->get || !chip->direction_input)
|
||||
goto fail;
|
||||
@ -1655,13 +1670,9 @@ lose:
|
||||
return status;
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
|
||||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Open drain pin should not be driven to 1 */
|
||||
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
return gpiod_direction_input(desc);
|
||||
@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->set || !chip->direction_output)
|
||||
goto fail;
|
||||
@ -1725,13 +1739,9 @@ lose:
|
||||
return status;
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
||||
int status = -EINVAL;
|
||||
int offset;
|
||||
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
if (!desc)
|
||||
goto fail;
|
||||
chip = desc->chip;
|
||||
if (!chip || !chip->set || !chip->set_debounce)
|
||||
goto fail;
|
||||
@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
||||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
if (status) {
|
||||
int gpio = -1;
|
||||
if (desc)
|
||||
gpio = desc_to_gpio(desc);
|
||||
pr_debug("%s: gpio-%d status %d\n",
|
||||
__func__, gpio, status);
|
||||
}
|
||||
if (status)
|
||||
pr_debug("%s: gpio-%d status %d\n", __func__,
|
||||
desc_to_gpio(desc), status);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
|
||||
* It returns the zero or nonzero value provided by the associated
|
||||
* gpio_chip.get() method; or zero if no such method is provided.
|
||||
*/
|
||||
static int gpiod_get_value(struct gpio_desc *desc)
|
||||
static int gpiod_get_value(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int value;
|
||||
int offset;
|
||||
|
||||
if (!desc)
|
||||
return 0;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
/* Should be using gpio_get_value_cansleep() */
|
||||
@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
chip = desc->chip;
|
||||
/* Should be using gpio_set_value_cansleep() */
|
||||
WARN_ON(chip->can_sleep);
|
||||
@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
|
||||
* This is used directly or indirectly to implement gpio_cansleep(). It
|
||||
* returns nonzero if access reading or writing the GPIO value can sleep.
|
||||
*/
|
||||
static int gpiod_cansleep(struct gpio_desc *desc)
|
||||
static int gpiod_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
if (!desc)
|
||||
return 0;
|
||||
/* only call this on GPIOs that are valid! */
|
||||
return desc->chip->can_sleep;
|
||||
}
|
||||
@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
|
||||
* It returns the number of the IRQ signaled by this (input) GPIO,
|
||||
* or a negative errno.
|
||||
*/
|
||||
static int gpiod_to_irq(struct gpio_desc *desc)
|
||||
static int gpiod_to_irq(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int offset;
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
|
||||
@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
|
||||
* Common examples include ones connected to I2C or SPI chips.
|
||||
*/
|
||||
|
||||
static int gpiod_get_value_cansleep(struct gpio_desc *desc)
|
||||
static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
struct gpio_chip *chip;
|
||||
int value;
|
||||
int offset;
|
||||
|
||||
might_sleep_if(extra_checks);
|
||||
if (!desc)
|
||||
return 0;
|
||||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
value = chip->get ? chip->get(chip, offset) : 0;
|
||||
@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
|
||||
struct gpio_chip *chip;
|
||||
|
||||
might_sleep_if(extra_checks);
|
||||
if (!desc)
|
||||
return;
|
||||
chip = desc->chip;
|
||||
trace_gpio_value(desc_to_gpio(desc), 0, value);
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
|
@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
|
||||
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
|
||||
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
|
||||
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
|
||||
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
|
||||
@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
|
||||
intel_modeset_disable(dev);
|
||||
|
||||
drm_irq_uninstall(dev);
|
||||
dev_priv->enable_hotplug_processing = false;
|
||||
}
|
||||
|
||||
i915_save_state(dev);
|
||||
@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
|
||||
error = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* We need working interrupts for modeset enabling ... */
|
||||
drm_irq_install(dev);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
intel_modeset_setup_hw_state(dev, false);
|
||||
drm_irq_install(dev);
|
||||
|
||||
/*
|
||||
* ... but also need to make sure that hotplug processing
|
||||
* doesn't cause havoc. Like in the driver load code we don't
|
||||
* bother with the tiny race here where we might loose hotplug
|
||||
* notifications.
|
||||
* */
|
||||
intel_hpd_init(dev);
|
||||
dev_priv->enable_hotplug_processing = true;
|
||||
}
|
||||
|
||||
intel_opregion_init(dev);
|
||||
|
@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
int i;
|
||||
|
||||
@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
de_ier = I915_READ(DEIER);
|
||||
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
|
||||
|
||||
/* Disable south interrupts. We'll only write to SDEIIR once, so further
|
||||
* interrupts will will be stored on its back queue, and then we'll be
|
||||
* able to process them after we restore SDEIER (as soon as we restore
|
||||
* it, we'll get an interrupt if SDEIIR still has something to process
|
||||
* due to its back queue). */
|
||||
sde_ier = I915_READ(SDEIER);
|
||||
I915_WRITE(SDEIER, 0);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
gt_iir = I915_READ(GTIIR);
|
||||
if (gt_iir) {
|
||||
snb_gt_irq_handler(dev, dev_priv, gt_iir);
|
||||
@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
|
||||
|
||||
I915_WRITE(DEIER, de_ier);
|
||||
POSTING_READ(DEIER);
|
||||
I915_WRITE(SDEIER, sde_ier);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
int ret = IRQ_NONE;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir;
|
||||
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
|
||||
POSTING_READ(DEIER);
|
||||
|
||||
/* Disable south interrupts. We'll only write to SDEIIR once, so further
|
||||
* interrupts will will be stored on its back queue, and then we'll be
|
||||
* able to process them after we restore SDEIER (as soon as we restore
|
||||
* it, we'll get an interrupt if SDEIIR still has something to process
|
||||
* due to its back queue). */
|
||||
sde_ier = I915_READ(SDEIER);
|
||||
I915_WRITE(SDEIER, 0);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
de_iir = I915_READ(DEIIR);
|
||||
gt_iir = I915_READ(GTIIR);
|
||||
pm_iir = I915_READ(GEN6_PMIIR);
|
||||
@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
done:
|
||||
I915_WRITE(DEIER, de_ier);
|
||||
POSTING_READ(DEIER);
|
||||
I915_WRITE(SDEIER, sde_ier);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1613,9 +1613,9 @@
|
||||
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
|
||||
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
|
||||
#define ADPA_SETS_HVPOLARITY 0
|
||||
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
|
||||
#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
|
||||
#define ADPA_VSYNC_CNTL_ENABLE 0
|
||||
#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
|
||||
#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
|
||||
#define ADPA_HSYNC_CNTL_ENABLE 0
|
||||
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
|
||||
#define ADPA_VSYNC_ACTIVE_LOW 0
|
||||
|
@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(crt->adpa_reg);
|
||||
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
|
||||
temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
|
||||
temp &= ~ADPA_DAC_ENABLE;
|
||||
I915_WRITE(crt->adpa_reg, temp);
|
||||
}
|
||||
|
@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
enum port port = intel_dig_port->port;
|
||||
bool wait;
|
||||
uint32_t val;
|
||||
bool wait = false;
|
||||
|
||||
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
|
||||
val = I915_READ(DDI_BUF_CTL(port));
|
||||
|
@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
|
||||
* cursor plane briefly if not already running after enabling the display
|
||||
* plane.
|
||||
* This workaround avoids occasional blank screens when self refresh is
|
||||
* enabled.
|
||||
*/
|
||||
static void
|
||||
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
u32 cntl = I915_READ(CURCNTR(pipe));
|
||||
|
||||
if ((cntl & CURSOR_MODE) == 0) {
|
||||
u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
|
||||
|
||||
I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
|
||||
I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
|
||||
intel_wait_for_vblank(dev_priv->dev, pipe);
|
||||
I915_WRITE(CURCNTR(pipe), cntl);
|
||||
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
|
||||
I915_WRITE(FW_BLC_SELF, fw_bcl_self);
|
||||
}
|
||||
}
|
||||
|
||||
static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
||||
|
||||
intel_enable_pipe(dev_priv, pipe, false);
|
||||
intel_enable_plane(dev_priv, plane, pipe);
|
||||
if (IS_G4X(dev))
|
||||
g4x_fixup_plane(dev_priv, pipe);
|
||||
|
||||
intel_crtc_load_lut(crtc);
|
||||
intel_update_fbc(dev);
|
||||
@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_framebuffer *old_fb = crtc->fb;
|
||||
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_unpin_work *work;
|
||||
unsigned long flags;
|
||||
@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
work->event = event;
|
||||
work->crtc = crtc;
|
||||
intel_fb = to_intel_framebuffer(crtc->fb);
|
||||
work->old_fb_obj = intel_fb->obj;
|
||||
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
|
||||
INIT_WORK(&work->work, intel_unpin_work_fn);
|
||||
|
||||
ret = drm_vblank_get(dev, intel_crtc->pipe);
|
||||
@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
intel_crtc->unpin_work = work;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
|
||||
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
|
||||
flush_workqueue(dev_priv->wq);
|
||||
|
||||
@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
cleanup_pending:
|
||||
atomic_dec(&intel_crtc->unpin_work_count);
|
||||
crtc->fb = old_fb;
|
||||
drm_gem_object_unreference(&work->old_fb_obj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user