Merge remote-tracking branch 'caf/LA.BF64.1.2.3_rb1.6' into HEAD

Change-Id: Ie912e9b346570a7b2d2d7cd5aadb6e09a440a523
This commit is contained in:
Olivier Karasangabo 2017-01-31 13:27:32 +01:00
commit 6641284139
No known key found for this signature in database
GPG Key ID: 055DBFDF9547D980
424 changed files with 17163 additions and 17531 deletions

View File

@ -110,10 +110,16 @@ $(KERNEL_CONFIG): $(KERNEL_OUT) FORCE
env KBUILD_DIFFCONFIG=$(KERNEL_DIFFCONFIG) \
$(MAKE) -C kernel O=../$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG)
$(KERNEL_HEADERS_INSTALL) $(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_BUILD_STAMP)
$(KERNEL_BUILD_STAMP): $(KERNEL_OUT) $(KERNEL_SRC_DIR)/
$(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_OUT) $(KERNEL_HEADERS_INSTALL)
$(hide) echo "Building kernel..."
$(hide) rm -rf $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts
$(MAKE) -C kernel O=../$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS)
$(MAKE) -C kernel O=../$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS) modules
$(MAKE) -C kernel O=../$(KERNEL_OUT) INSTALL_MOD_PATH=../../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) modules_install
$(mv-modules)
$(clean-module-folder)
$(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT)
$(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \
$(hide) rm -f ../$(KERNEL_CONFIG); \
env KBUILD_DIFFCONFIG=$(KERNEL_DIFFCONFIG) \
@ -129,14 +135,6 @@ $(KERNEL_BUILD_STAMP): $(KERNEL_OUT) $(KERNEL_SRC_DIR)/
echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
$(MAKE) -C kernel O=../$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
$(hide) rm -rf $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts
$(MAKE) -C kernel O=../$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS)
$(MAKE) -C kernel O=../$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS) modules
$(MAKE) -C kernel O=../$(KERNEL_OUT) INSTALL_MOD_PATH=../../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) modules_install
$(mv-modules)
$(clean-module-folder)
$(hide) touch $(KERNEL_HEADERS_INSTALL) $(TARGET_PREBUILT_INT_KERNEL) $(KERNEL_BUILD_STAMP)
kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG)
$(MAKE) -C kernel O=../$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) tags
@if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \

View File

@ -0,0 +1,119 @@
What: /sys/block/zram<id>/num_reads
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The num_reads file is read-only and specifies the number of
reads (failed or successful) done on this device.
Now accessible via zram<id>/stat node.
What: /sys/block/zram<id>/num_writes
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The num_writes file is read-only and specifies the number of
writes (failed or successful) done on this device.
Now accessible via zram<id>/stat node.
What: /sys/block/zram<id>/invalid_io
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The invalid_io file is read-only and specifies the number of
non-page-size-aligned I/O requests issued to this device.
Now accessible via zram<id>/io_stat node.
What: /sys/block/zram<id>/failed_reads
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The failed_reads file is read-only and specifies the number of
failed reads happened on this device.
Now accessible via zram<id>/io_stat node.
What: /sys/block/zram<id>/failed_writes
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The failed_writes file is read-only and specifies the number of
failed writes happened on this device.
Now accessible via zram<id>/io_stat node.
What: /sys/block/zram<id>/notify_free
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The notify_free file is read-only. Depending on device usage
scenario it may account a) the number of pages freed because
of swap slot free notifications or b) the number of pages freed
because of REQ_DISCARD requests sent by bio. The former ones
are sent to a swap block device when a swap slot is freed, which
implies that this disk is being used as a swap disk. The latter
ones are sent by filesystem mounted with discard option,
whenever some data blocks are getting discarded.
Now accessible via zram<id>/io_stat node.
What: /sys/block/zram<id>/zero_pages
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The zero_pages file is read-only and specifies number of zero
filled pages written to this disk. No memory is allocated for
such pages.
Now accessible via zram<id>/mm_stat node.
What: /sys/block/zram<id>/orig_data_size
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The orig_data_size file is read-only and specifies uncompressed
size of data stored in this disk. This excludes zero-filled
pages (zero_pages) since no memory is allocated for them.
Unit: bytes
Now accessible via zram<id>/mm_stat node.
What: /sys/block/zram<id>/compr_data_size
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The compr_data_size file is read-only and specifies compressed
size of data stored in this disk. So, compression ratio can be
calculated using orig_data_size and this statistic.
Unit: bytes
Now accessible via zram<id>/mm_stat node.
What: /sys/block/zram<id>/mem_used_total
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The mem_used_total file is read-only and specifies the amount
of memory, including allocator fragmentation and metadata
overhead, allocated for this disk. So, allocator space
efficiency can be calculated using compr_data_size and this
statistic.
Unit: bytes
Now accessible via zram<id>/mm_stat node.
What: /sys/block/zram<id>/mem_used_max
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The mem_used_max file is read/write and specifies the amount
of maximum memory zram have consumed to store compressed data.
For resetting the value, you should write "0". Otherwise,
you could see -EINVAL.
Unit: bytes
Downgraded to write-only node: so it's possible to set new
value only; its current value is stored in zram<id>/mm_stat
node.
What: /sys/block/zram<id>/mem_limit
Date: August 2015
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Description:
The mem_limit file is read/write and specifies the maximum
amount of memory ZRAM can use to store the compressed data.
The limit could be changed in run time and "0" means disable
the limit. No limit is the initial state. Unit: bytes
Downgraded to write-only node: so it's possible to set new
value only; its current value is stored in zram<id>/mm_stat
node.

View File

@ -0,0 +1,71 @@
What: /sys/class/dual_role_usb/.../
Date: June 2015
Contact: Badhri Jagan Sridharan<badhri@google.com>
Description:
Provide a generic interface to monitor and change
the state of dual role usb ports. The name here
refers to the name mentioned in the
dual_role_phy_desc that is passed while registering
the dual_role_phy_intstance through
devm_dual_role_instance_register.
What: /sys/class/dual_role_usb/.../supported_modes
Date: June 2015
Contact: Badhri Jagan Sridharan<badhri@google.com>
Description:
This is a static node, once initialized this
is not expected to change during runtime. "dfp"
refers to "downstream facing port" i.e. port can
only act as host. "ufp" refers to "upstream
facing port" i.e. port can only act as device.
"dfp ufp" refers to "dual role port" i.e. the port
can either be a host port or a device port.
What: /sys/class/dual_role_usb/.../mode
Date: June 2015
Contact: Badhri Jagan Sridharan<badhri@google.com>
Description:
The mode node refers to the current mode in which the
port is operating. "dfp" for host ports. "ufp" for device
ports and "none" when cable is not connected.
On devices where the USB mode is software-controllable,
userspace can change the mode by writing "dfp" or "ufp".
On devices where the USB mode is fixed in hardware,
this attribute is read-only.
What: /sys/class/dual_role_usb/.../power_role
Date: June 2015
Contact: Badhri Jagan Sridharan<badhri@google.com>
Description:
The power_role node mentions whether the port
is "sink"ing or "source"ing power. "none" if
they are not connected.
On devices implementing USB Power Delivery,
userspace can control the power role by writing "sink" or
"source". On devices without USB-PD, this attribute is
read-only.
What: /sys/class/dual_role_usb/.../data_role
Date: June 2015
Contact: Badhri Jagan Sridharan<badhri@google.com>
Description:
The data_role node mentions whether the port
is acting as "host" or "device" for USB data connection.
"none" if there is no active data link.
On devices implementing USB Power Delivery, userspace
can control the data role by writing "host" or "device".
On devices without USB-PD, this attribute is read-only
What: /sys/class/dual_role_usb/.../powers_vconn
Date: June 2015
Contact: Badhri Jagan Sridharan<badhri@google.com>
Description:
The powers_vconn node mentions whether the port
is supplying power for VCONN pin.
On devices with software control of VCONN,
userspace can disable the power supply to VCONN by writing "n",
or enable the power supply by writing "y".

View File

@ -1,398 +0,0 @@
Introduction
============
TSC Driver
The TSC (Transport Stream Controller) is a hardware block used in products such
as smart TVs, Set-top boxes and digital media adapters, and is responsible for
two main functionalities:
1. Mux function: enabling the routing of MPEG-2 transport streams (TS) received
from terrestrial/cable/satelite in order to support the different topologies of
the end product, as it may be deployed in many different topologies.
In addition, the active topology may change according to various factors such as
broadcast technology and/or conditional access system.
2. CI function: acting as a common interface, complying with both PC Card and
CI/+ specifications.
The TSC driver has two different interfaces, one for each function.
Hardware description
====================
The TSC HW contains the TSC core, and uses the VBIF unit (IOMMU) which is part
of the broadcast subsystem HW.
Mux function:
-------------
The TSC can receive transport streams from:
a. Two Transport Stream Interfaces (TSIFs) 0 or 1, connected to two external
demods or to external bridge.
b. One TSIF from an integrated demod.
The TSC can route TS from any of the above TSIFs to an external CICAM, using a
software configurable mux.
The TSC can route TS from any of the above TSIFs, and TS received from the CI
Conditional Access Mudule (CICAM) to two TSIF outputs (0 or 1), using two
software configurable muexes.
The CICAM input and outputs are also managed via two additional TSIFs: TSIF-out
to the CAM, and TSIF-in from the CAM.
CI function:
------------
The common interface is composed of:
1. Card detection logic: the TSC notifies the SW of any change in the card
detection status (via HW interrupt).
2. Control interface used to send/receive the CI messages (APDUs), supporting
data transmission in two formats:
a. Single byte transactions: to/from the attribute memory space of the CAM and
the command area of the CAM.
b. Buffer transactions: to/from the command area of the CAM, using a
configurable buffer size of 1k bytes-64k bytes. This enables transferring
large chunks of data between the CAM and applications.
The data buffer resides in the external memory and the interface to the
memory is done through BCSS VBIF.
The TSC uses PCMCIA interface to interact with the CAM.
The following diagram provides an overview of the TSC HW:
+-------------------------------------------------------------------------+
| |
| +------------------------------+ |
| +-----------+ | TSC Core --. | |
| |Ext. TSIF 0+------------+------------>| \ | +-----------+ |
| +-----------+ | +-----|------------>|Mux)----->TSPP TSIF 0| |
| +-----------+ | | +--|------------>| / | +-----------+ |
| |Ext. TSIF 1+------| | | +->--' | |
| +-----------+ | | | | | --. | |
| | | | +----------|->| \ | +-----------+ |
| +-----------+ | +--|--|-+--------|->|Mux)----->TSPP TSIF 1| |
| |Int. TSIF +---------+--|-|-+------|->| / | +-----------+ |
| +-----------+ | | | | +->--' | |
| | | | | | | |
| | | | | | | |
| |+------+(v-v-v--) | +-----+| |
| ||Card | \ Mux / | |CI/+ +---Data-Interface--+ |
| ||detect| `---' | +----++| | |
| |+-^-^--+ | | | | | |
| +--|-|-------|-------|-------|-+ +------+----+ |
| | | | | | | VBIF | |
| | | +-----v--+ +--+----+ | | | |
| | | |TSIF-Out| |TSIF-In| | +-----------+ |
| | | +-----+--+ +--^----+ | |
| | | | | | |
| ++-+-------v-------+-------++ |
| | CICAM | |
| | | |
| +---------------------------+ |
+-------------------------------------------------------------------------+
Software description
====================
The TSC Linux kernel driver manages the TSC core. It is a standard Linux
platform device driver. It can be configured as a loadable or built-in kernel
module. The driver is supported only in platforms that contain the TSC HW.
The TSC driver uses ION driver to control the IOMMU and map user-allocated
buffers to the TSC IOMMU domain.
The driver provides an abstraction of the TSC HW functionality for user-space
clients via two separate interfaces: tsc_mux and tsc_ci. These interfaces may
be used by upper layers to utilize the TSC HW for routing the TS and supporting
the Common Interface specification.
Driver initialization
---------------------
The driver's probe function is invoked if there is a matching device tree node.
The probe function gets the required memory resources (i.e., register address
spaces) and maps them to kernel space for the driver's use.
The probe function also requests the required IRQs, GPIOs and clocks, and gets
the TSC IOMMU domain. The probe function also disables the TSIFs input.
Finally, the function creates two character device drivers: "tsc_mux","tsc_ci".
See API description in interface section.
Data paths
-----------
The TSC does not process the TS data received from the TSIFs. It just manages
the routing of that data.
Control paths - Mux function
----------------------------
Example for routing the TS from external demod TSIF 0 to the CAM, and from the
CAM to TSIF 1 of the TSPP:
struct tsc_route tsif_cam = {TSC_SOURCE_EXTERNAL0, TSC_DEST_CICAM};
struct tsc_route cam_tspp = {TSC_SOURCE_CICAM, TSC_DEST_TSPP1};
int mux_fd, ret;
enum tsc_source tsif0 = TSC_SOURCE_EXTERNAL0;
enum tsc_source cam = TSC_SOURCE_CICAM;
/* opening Mux char device */
mux_fd = open("/dev/tsc_mux0");
/* Configure the CAM mux to route TS from external demod TSIF 0: */
ret = ioctl(mux_fd, TSC_CONFIG_ROUTE, &tsif_cam);
/* Configure the TSPP TSIF 1 mux to route TS from CAM: */
ret = ioctl(mux_fd, TSC_CONFIG_ROUTE, &cam_tspp);
/* Enabling the external demod TSIF 0, and the CAM TSIF-in and TSIF-out */
ret = ioctl(mux_fd, TSC_ENABLE_INPUT, &tsif0);
ret = ioctl(mux_fd, TSC_ENABLE_INPUT, &cam);
close(mux_fd);
Control paths - CI function
---------------------------
Example for writing a buffer to the CAM command area:
Assumptions:
1. The user allocated a buffer using ION driver and wrote to that buffer.
Also, retrieved the ion fd of that buffer and saved it to:
int buffer_fd;
2. The user already performed buffer size negotiation with the CAM according to
CI/+ specification, and had set the CAM size register with the buffer size. This
size is saved to: int size;
3. The user decided about the time the user wants to wait for the data
transmission.
struct tsc_buffer_mode buff_params = {buffer_fd, size, timeout};
int ret;
/* Perform a blocking write buffer transaction for at most timeout */
ret = ioctl(fd, TSC_WRITE_CAM_BUFFER, &buff_params);
/* ret indicate whether the transaction succeeded */
Example for SW reset to the CAM (according to CI/+ specification):
struct single_byte_mode cmd_params = {1, RS bit set, timeout};
struct single_byte_mode stat_params = {1, not initialize, timeout};
int ci_fd, ret;
u8 data;
/* opening CI char device */
ci_fd = open("/dev/tsc_ci0");
/* Setting the RS bit of the CAM command register */
ret = ioctl(ci_fd, TSC_WRITE_CAM_IO, &cmd_params);
/* Polling the FR bit of the CAM status register */
ret = ioctl(ci_fd, TSC_READ_CAM_IO, &stat_params);
data = stat_params.data;
while (data & FR_BIT_MASK) {
ret = ioctl(ci_fd, TSC_READ_CAM_IO, &stat_params);
data = stat_params.data;
}
close(ci_fd);
Design
======
The TSC driver is a regular Linux platform driver designed to support the
TSC HW available on specific SoCs.
The driver provides two user-space APIs: tsc_mux that allows the client full
control over the configuration of the TS routing, and tsc_ci that enables the
client to implement the Common Interface in front of the CAM. It does so while
encapsulating HW implementation details that are not relevant to the clients.
The driver enforces HW restrictions and checks for input parameters
validity, providing a success or failure return value for each API function:
0 upon success or negative value on failure. Errno parameter is set to indicate
the failure reason.
However, the driver does not enforce any high-level policy with regard to the
correct use of the TSC HW for various use-cases.
Power Management
================
The TSC driver prevents the CPU from sleeping while the HW is active by using
wakeup_source API. When there are no open devices the driver releases the wakeup
source. In a similar manner, the driver enables the HW clocks only when needed.
SMP/multi-core
==============
The driver uses a spinlock to protect accesses to its internal databases,
for synchronization between user control API and kernel interrupt handlers.
The driver uses a mutex for all the Mux operations to synchronize access to the
routing internal databases. The driver uses another mutex for all the CI
operations to synchronize data sent and received to and from the CAM.
Security
========
Although the TSC is the bridge the external conditional access module, it has no
security aspects. Any protection which is needed is performed by the upper
layers. For example, the messages which are written to the CAM are encrypted.
Thus the TSC accesses only non-protected, HLOS accessible memory regions.
Performance
===========
Control operations are not considered as performance critical.
Most of the control operations are assumed to be fairly uncommon.
Interface
=========
Kernel-space API
----------------
The TSC driver does not provide any kernel-space API, only a user-space API.
User-space API
----------------
Open: upper layer can open tsc_mux device and/or tsc_ci device.
Release: close the device and release all the allocated resources.
Poll: two different functions- one for Mux, one for CI. The Mux poll wait for
rate mismatch interrupt. The CI poll waits for card detection HW interrupt.
The rate mismatch interrupt is not cleared in the interrupt handler because it
will signal again all the time. Therefore it is cleared via a specific ioctl
that upper layer can use after the problem is solved. Additionally, the
interrupt is cleared when the card is removed.
ioctl: two functions, one for mux and one for ci. The ioctl are specified below.
TSC Mux - routing the TS:
-------------------------
enum tsc_source {
TSC_SOURCE_EXTERNAL0,
TSC_SOURCE_EXTERNAL1,
TSC_SOURCE_INTERNAL,
TSC_SOURCE_CICAM
};
enum tsc_dest {
TSC_DEST_TSPP0,
TSC_DEST_TSPP1,
TSC_DSET_CICAM
};
struct tsc_route {
enum tsc_source source;
enum tsc_dest dest;
};
#define TSC_CONFIG_ROUTE _IOW(TSC_IOCTL_BASE, 0, struct tsc_tspp_route)
#define TSC_ENABLE_INPUT _IOW(TSC_IOCTL_BASE, 1, enum tsc_source)
#define TSC_DISABLE_INPUT _IOW(TSC_IOCTL_BASE, 2, enum tsc_source)
These 3 IOCTLs control the 3 muxes that route the TS, and enable/disable the
TSIFs input.
TSC Mux - configuring the TSIFs:
--------------------------------
enum tsc_data_type {
TSC_DATA_TYPE_SERIAL,
TSC_DATA_TYPE_PARALLEL
};
enum tsc_receive_mode {
TSC_RECEIVE_MODE_START_VALID,
TSC_RECEIVE_MODE_START_ONLY,
TSC_RECEIVE_MODE_VALID_ONLY
};
struct tsc_tsif_params {
enum tsc_source source;
enum tsc_receive_mode receive_mode;
enum tsc_data_type data_type;
int clock_polarity;
int data_polarity;
int start_polarity;
int valid_polarity;
int error_polarity;
int data_swap;
int set_error;
};
#define TSC_SET_TSIF_CONFIG _IOW(TSC_IOCTL_BASE, 3, struct tsc_tsif_params)
This IOCTL enables configuring a specific TSIF with all possible configurations.
TSC Mux - clearing rate mismatch interrupt
------------------------------------------
#define TSC_CLEAR_RATE_MISMATCH_IRQ _IO(TSC_IOCTL_BASE, 4)
This IOCTL is used for clearing the interrupt, which is not done automatically
by the driver.
TSC CI - CAM configuration:
---------------------------
enum tsc_cam_personality {
TSC_CICAM_PERSONALITY_CI,
TSC_CICAM_PERSONALITY_CIPLUS,
TSC_CICAM_PERSONALITY_PCCARD,
TSC_CICAM_PERSONALITY_DISABLE
};
enum tsc_card_status {
TSC_CARD_STATUS_NOT_DETECTED,
TSC_CARD_STATUS_DETECTED,
TSC_CARD_STATUS_FAILURE
};
#define TSC_CICAM_SET_CLOCK _IOW(TSC_IOCTL_BASE, 5, int)
This IOCTL sets the clock rate of the TS from the TSC to the CAM
#define TSC_CAM_RESET _IO(TSC_IOCTL_BASE, 6)
This IOCTL performs HW reset to the CAM
#define TSC_CICAM_PERSONALITY_CHANGE \
_IOW(TSC_IOCTL_BASE, 7, enum tsc_cam_personality)
This IOCTL configures the PCMCIA pins according to the specified card type.
#define TSC_GET_CARD_STATUS _IOR(TSC_IOCTL_BASE, 8, enum tsc_card_status)
This IOCTL queries the card detection pins and returns their status.
TSC CI - Data transactions:
---------------------------
struct tsc_single_byte_mode {
u16 address;
u8 data;
int timeout; /* in msec */
};
struct tsc_buffer_mode {
int buffer_fd;
u16 buffer_size;
int timeout; /* in msec */
};
#define TSC_READ_CAM_MEMORY \
_IOWR(TSC_IOCTL_BASE, 9, struct tsc_single_byte_mode)
#define TSC_WRITE_CAM_MEMORY \
_IOW(TSC_IOCTL_BASE, 10, struct tsc_single_byte_mode)
#define TSC_READ_CAM_IO \
_IOWR(TSC_IOCTL_BASE, 11, struct tsc_single_byte_mode)
#define TSC_WRITE_CAM_IO \
_IOW(TSC_IOCTL_BASE, 12, struct tsc_single_byte_mode)
#define TSC_READ_CAM_BUFFER \
_IOWR(TSC_IOCTL_BASE, 13, struct tsc_buffer_mode)
#define TSC_WRITE_CAM_BUFFER \
_IOW(TSC_IOCTL_BASE, 14, struct tsc_buffer_mode)
These IOCTLs performs a read/write data transaction of the requested type.
Driver parameters
=================
The TSC module receives one parameter:
tsc_iommu_bypass - 0 for using the VBIF, 1 for not using it. Not using the VBIF
is a debug configuration.
Config options
==============
To enable the driver, set CONFIG_TSC to y (built-in) or m (kernel module)
in the kernel configuration menu.
Dependencies
============
The TSC driver uses the ION driver for IOMMU registration and buffer
mapping to BCSS VBIF.
User space utilities
====================
None.
Other
=====
None.
Known issues
============
None.
To do
=====
None.

View File

@ -1,497 +0,0 @@
Introduction
============
TSPP2 Driver
The TSPP2 (Transport Stream Packet Processor v2) is a hardware accelerator
designed to process MPEG-2 Transport Stream (TS) data. It can be used to
process broadcast TV services. The TSPP2 HW processes the TS packets, offloads
the host CPU and supports the real-time processing requirements of such
services.
TS data can be received either from TSIF (Transport Stream Interface) input
or from memory input, to support playing live broadcasts as well as
playback from memory. Recording is also supported.
TSPP2 is a significantly different HW unit than the TSPP unit described in
Documentation/arm/msm/tspp.txt. The functionality is enhanced and the HW
design is different.
Hardware description
====================
The TSPP2 HW contains the TSPP2 core, a BAM (Bus Access Manager, used for DMA
operations) unit, and a VBIF unit (IOMMU).
The TSPP2 HW supports:
a. Up to two TSIF inputs and up to eight memory inputs.
b. Various TS packet sizes (188/192 bytes) and formats (timestamp location).
c. PID filtering.
d. Raw transmit operation for section filtering or recording.
e. Full PES and separated PES transmit operation for audio and video playback.
f. Decryption and re-encryption operations for secure transport streams.
g. PCR extraction.
h. Indexing - identifying patterns in video streams.
The following diagram provides an overview of the TSPP2 HW:
+------------------------------------------------------------------+
| |
| +-------------+ +--------------------+ |
| | TSIF 0 +---> TSPP2 Core | |
| +-------------+ | | |
| | +---------------+ | |
| +-------------+ | | | | |
| | TSIF 1 +---> | Source 0 | | |
| +-------------+ | | | | |
| | | | | |
| | | | | |
| | | +------------+| | +--------------+ |
| | | | Filter 0 +|---------> BAM pipe 3 | |
| | | +------------+| | +--------------+ |
| | | +------------+| | +--------------+ |
| +-------------+ | | | Filter 1 +|---------> BAM pipe 4 | |
| | BAM pipe 0 +---> | +------------+| | +--------------+ |
| +-------------+ | | | | | |
| +-------------+ | +---------------+ | +--------------+ |
| | BAM pipe 1 +--->--------------------|----| | |
| +-------------+ | | | VBIF | |
| +-------------+ | | | IOMMU | |
| | BAM pipe 2 +--->--------------------|----| | |
| +-------------+ +--------------------+ +--------------+ |
+------------------------------------------------------------------+
A source is configured to have either a TSIF input (TSIF 0 or 1) or a
memory input (a BAM pipe). One or more filters are attached to the source.
Each filter has a 13-bit PID and mask values to perform the PID filtering.
Additionally, one or more operations are added to each filter to achieve the
required functionality. Each operation has specific parameters. The operation's
output is usually placed in an output pipe.
The TSPP HW uses its own virtual address space, mapping memory buffer addresses
using the VBIF IOMMU.
Software description
====================
The TSPP2 Linux kernel driver manages the TSPP2 core. The TSPP2 driver utilizes
the SPS driver to configure and manage the BAM unit, which is used to perform
DMA operations and move TS data to/from system memory.
The TSPP2 driver uses the ION driver to control the IOMMU and map user-allocated
buffers to the TSPP2 IOMMU domain.
The TSPP2 is a standard Linux platform device driver. It can be configured as a
loadable or built-in kernel module. The driver is supported only in platforms
that contain the TSPP2 HW.
The driver provides an abstraction of the TSPP2 HW functionality for
kernel-space clients. For example, the dvb/demux kernel driver, which provides
an API for upper layers to perform TS de-multiplexing (including PID filtering,
recording, indexing etc.), uses the TSPP2 driver to utilize the TSPP2 HW and
offload the CPU, instead of doing all the required processing in SW.
For further information please refer to Documentation/dvb/qcom-mpq.txt.
Terminology
-----------
This section describes some of the software "objects" implemented by the driver.
a. TSPP2 device: an instance of the TSPP2 device representing the TSPP2 HW and
its capabilities. The client identifies a device instance according to a
device ID.
b. Indexing table: A TSPP2 device contains 4 indexing tables. These tables are
used to identify patterns in the video stream and report on them.
The client identifies an indexing table according to a table ID.
c. Pipe: a BAM pipe used for DMA operations. The TSPP2 HW has a BAM unit with
31 pipes. A pipe contains a memory buffer and a corresponding descriptor ring,
and is used as the output for TSPP2 data (e.g. PES payload, PES headers,
indexing information etc.). For memory inputs, a pipe is used as the input
buffer where data can be written to for TSPP2 processing. BAM Pipes are
managed by the TSPP2 driver using the SPS driver which controls BAM HW. The
client is responsible for buffer memory allocation, and can control many
BAM-related pipe parameters.
d. Source: a source object represents data "stream" from the TS input,
through the filters and operations that perform the processing on the TS data,
until the output. A source has the following properties:
- Either a TSIF or a memory input.
- For memory input: an input pipe.
- Source-related configuration (e.g., packet size and format).
- One or more PID filters. Each filter contains operations.
- One or more output pipes.
The client is responsible to configure the source object as needed using the
appropriate API. The client identifies a source using a source handle, which
the driver provides when opening a source for use.
e. Filter: a filter object represents a PID filter which is used to get only the
TS packets with specific PIDs and filter out all other TS packets in the stream.
The client adds filters to the source object to define the processing of data.
Each filter has a 13-bit PID value and bit-mask, so a filter can be used to
get TS packets with various PID values. Note, however, that it is highly
recommended to use each filter with a unique PID (i.e., 0x1FFF mask), and it is
mandatory that the PIDs handled by each source's filters are mutually exclusive
(i.e., the client must not configure two filters in the same source that handle
the same PID values). A filter has up to 16 operations that instruct the TSPP2
HW how to process the data. The client identifies a filter using a filter
handle, which the driver provides when opening a filter for use.
f. Operation: an operation object represents a basic building block describing
how data is processed. Operations are added to a filter and are performed on
the data received by this filter, in the order they were added. One or more
operations may be required to achieve the desired functionality. For example,
a "section filtering" functionality requires a raw transmit operation, while a
"recording" functionality requires a raw transmit operations as well as an
indexing operation (to support trick modes).
Driver initialization
---------------------
The driver's probe function is invoked if there is a matching device tree node
(or platform device). The probe function gets the required memory resources
(i.e., register address spaces) and maps them to kernel space for the
driver's use. The probe function also request the required IRQs and gets the
TSPP2 IOMMU domain. Finally, the probe function resets all HW registers to
appropriate default values, and resets all the required software structures.
See API description in Interface section.
Usage examples
--------------
Section filtering example - opening a Raw filter with data from TSIF0:
----------------------------------------------------------------------
u32 dev_id = 0;
u32 src_handle;
u32 pipe_handle;
u32 filter_handle;
u32 iova;
u32 vaddress;
struct tspp2_config cfg = {...};
struct tspp2_pipe_config_params pipe_config;
struct tspp2_pipe_pull_mode_params pull_params = {0, 0};
struct tspp2_operation raw_op;
struct sps_event_notify event;
struct sps_iovec desc;
/* Open TSPP2 device for use */
tspp2_device_open(dev_id);
/* Set global configuration */
tspp2_config_set(dev_id, &cfg);
/* Open source with TSIF0 input */
tspp2_src_open(dev_id, TSPP2_INPUT_TSIF0, &src_handle);
/* Set parsing options if needed, for example: */
tspp2_src_parsing_option_set(src_handle,
TSPP2_SRC_PARSING_OPT_CHECK_CONTINUITY, 1);
/* Assume normal sync byte, assume no need for scrambling configuration */
/* Set packet size and format: */
tspp2_src_packet_format_set(src_handle, TSPP2_PACKET_FORMAT_188_RAW);
/* Since this is TSIF input, flow control is in push mode */
/* Allocate memory for output pipe via ION not shown here */
/* Open an output pipe for use */
pipe_config.ion_client = ...
pipe_config.buffer_handle = ...
pipe_config.buffer_size = ...
pipe_config.pipe_mode = TSPP2_SRC_PIPE_OUTPUT;
pipe_config.sps_cfg.descriptor_size = 188;
pipe_config.sps_cfg.setting = (SPS_O_AUTO_ENABLE | SPS_O_HYBRID |
SPS_O_OUT_OF_DESC | SPS_O_ACK_TRANSFERS);
pipe_config.sps_cfg.wakeup_events = SPS_O_OUT_OF_DESC;
pipe_config.sps_cfg.callback = ...
pipe_config.sps_cfg.user_info = ...
tspp2_pipe_open(dev_id, &pipe_config, &iova, &pipe_handle);
/* Attache the pipe to the source */
tspp2_src_pipe_attach(src_handle, pipe_handle, &pull_params);
/* Open a filter for PID 13 */
tspp2_filter_open(src_handle, 13, 0x1FFF, &filter_handle);
/* Add a raw transmit operation */
raw_op.type = TSPP2_OP_RAW_TRANSMIT;
raw_op.params.raw_transmit.input = TSPP2_OP_BUFFER_A;
raw_op.params.raw_transmit.timestamp_mode = TSPP2_OP_TIMESTAMP_NONE;
raw_op.params.raw_transmit.skip_ts_packets_with_errors = 0;
raw_op.params.raw_transmit.output_pipe_handle = pipe_handle;
tspp2_filter_operations_add(filter_handle, &raw_op, 1);
/* Enable filter and source to start getting data */
tspp2_filter_enable(filter_handle);
tspp2_source_enable(src_handle);
/*
* Data path: poll pipe (or get notifications from pipe via
* registered callback).
*/
tspp2_pipe_last_address_used_get(pipe_handle, &vaddress);
/* Process data... */
/* Get and release descriptors: */
tspp2_pipe_descriptor_get(pipe_handle, &desc);
tspp2_pipe_descriptor_put(pipe_handle, desc.addr, desc.size, ...);
/* Teardown: */
tspp2_src_disable(src_handle);
tspp2_filter_disable(filter_handle);
tspp2_filter_close(filter_handle);
tspp2_src_pipe_detach(src_handle, pipe_handle);
tspp2_pipe_close(pipe_handle);
tspp2_src_close(src_handle);
tspp2_device_close(dev_id);
Debug facilities
----------------
The TSPP2 driver supports several debug facilities via debugfs:
a. Ability to read the status of TSIF and TSPP2 HW registers via debugfs.
b. Ability to print HW statistics, error and performance counters via debugfs.
c. Ability to print SW status via debugfs.
Design
======
The TSPP2 driver is a regular Linux platform driver designed to support the
TSPP2 HW available on specific Qualcomm SoCs.
The driver provides an extensive kernel-space API to allow the client full
control over the configuration of the TSPP2 HW, while encapsulating HW
implementation details that are not relevant to the client.
The driver enforces HW restrictions and checks for input parameters
validity, providing a success or failure return value for each API function.
However, the driver does not enforce any high-level policy with regard to the
correct use of the TSPP2 HW for various use-cases.
Power Management
================
The TSPP2 driver prevents the CPU from sleeping while the HW is active by
using the wakeup_source API. When the HW is not active (i.e., no sources
configured), the driver indicates it is ready for system suspend by invoking
__pm_relax(). When the HW needs to be active (i.e., a source has been opened and
enabled), the driver invokes __pm_stay_awake().
In a similar manner, the driver enables the HW clocks only when needed.
The TSPP2 HW manages power saving automatically when the HW is not used.
No SW involvement is required.
SMP/multi-core
==============
The driver uses a mutex for mutual exclusion between kernel API calls.
A spinlock is used to protect accesses to its internal databases which can be
performed both from interrupt handler context and from API context.
Security
========
None.
Performance
===========
Control operations are not considered as performance critical.
Most of the control operations are assumed to be fairly uncommon.
Data-path operations involve only getting descriptors from the pipe and
releasing them back to the pipe for reuse.
Interface
=========
Kernel-space API
----------------
Control path API
-------------------
TSPP2 device open / close API:
------------------------------
int tspp2_device_open(u32 dev_id);
int tspp2_device_close(u32 dev_id);
Global configuration for the TSPP2 device:
------------------------------------------
int tspp2_config_set(u32 dev_id, const struct tspp2_config *cfg);
Set device global configuration.
int tspp2_config_get(u32 dev_id, struct tspp2_config *cfg);
Get current device global configuration.
Configure Indexing Tables:
--------------------------
int tspp2_indexing_prefix_set(u32 dev_id, u8 table_id, u32 value, u32 mask);
Set prefix value and mask of an indexing table.
int tspp2_indexing_patterns_add(u32 dev_id, u8 table_id, const u32 *values,
const u32 *masks, u8 patterns_num);
Add patterns to an indexing table.
int tspp2_indexing_patterns_clear(u32 dev_id, u8 table_id);
Clear all patterns of an indexing table
Opening and closing Pipes:
--------------------------
int tspp2_pipe_open(u32 dev_id, const struct tspp2_pipe_config_params *cfg,
u32 *iova, u32 *pipe_handle);
Open a pipe for use.
int tspp2_pipe_close(u32 pipe_handle);
Close an opened pipe.
Source configuration:
---------------------
int tspp2_src_open(u32 dev_id, enum tspp2_src_input input, u32 *src_handle);
Open a new source for use.
int tspp2_src_close(u32 src_handle);
Close an opened source.
int tspp2_src_parsing_option_set(u32 src_handle,
enum tspp2_src_parsing_option option, int value);
Set source parsing configuration option.
int tspp2_src_parsing_option_get(u32 src_handle,
enum tspp2_src_parsing_option option, int *value);
Get source parsing configuration option.
int tspp2_src_sync_byte_config_set(u32 src_handle, int check_sync_byte,
u8 sync_byte_value);
Set source sync byte configuration.
int tspp2_src_sync_byte_config_get(u32 src_handle, int *check_sync_byte,
u8 *sync_byte_value);
Get source sync byte configuration.
int tspp2_src_scrambling_config_set(u32 src_handle,
const struct tspp2_src_scrambling_config *cfg);
Set source scrambling configuration.
int tspp2_src_scrambling_config_get(u32 src_handle,
struct tspp2_src_scrambling_config *cfg);
Get source scrambling configuration.
int tspp2_src_packet_format_set(u32 src_handle,
enum tspp2_packet_format format);
Set source packet size and format.
int tspp2_src_pipe_attach(u32 src_handle, u32 pipe_handle,
const struct tspp2_pipe_pull_mode_params *cfg);
Attach a pipe to a source.
int tspp2_src_pipe_detach(u32 src_handle, u32 pipe_handle);
Detach a pipe from a source.
int tspp2_src_enable(u32 src_handle);
Enable source (start using it).
int tspp2_src_disable(u32 src_handle);
Disable source (stop using it).
int tspp2_src_filters_clear(u32 src_handle);
Clear all filters from a source.
Filter and Operation configuration:
-----------------------------------
int tspp2_filter_open(u32 src_handle, u16 pid, u16 mask, u32 *filter_handle);
Open a new filter and add it to a source.
int tspp2_filter_close(u32 filter_handle);
Close a filter.
int tspp2_filter_enable(u32 filter_handle);
Enable a filter.
int tspp2_filter_disable(u32 filter_handle);
Disable a filter.
int tspp2_filter_operations_set(u32 filter_handle,
const struct tspp2_operation *ops, u8 operations_num);
Set (add or update) operations to a filter.
int tspp2_filter_operations_clear(u32 filter_handle);
Clear all operations from a filter.
int tspp2_filter_current_scrambling_bits_get(u32 filter_handle,
u8 *scrambling_bits_value);
Get the current scrambling bits.
Events notifications registration:
----------------------------------
int tspp2_global_event_notification_register(u32 dev_id,
u32 global_event_bitmask,
void (*callback)(void *cookie),
void *cookie);
Get notified on a global event.
int tspp2_src_event_notification_register(u32 src_handle,
u32 src_event_bitmask,
void (*callback)(void *cookie),
void *cookie);
Get notified on a source event.
int tspp2_filter_event_notification_register(u32 filter_handle,
u32 filter_event_bitmask,
void (*callback)(void *cookie),
void *cookie);
Get notified on a filter event.
Data path API
----------------
int tspp2_pipe_descriptor_get(u32 pipe_handle, struct sps_iovec *desc);
Get a data descriptor from a pipe.
int tspp2_pipe_descriptor_put(u32 pipe_handle, u32 addr,
u32 size, u32 flags);
Put (release) a descriptor for reuse by the pipe.
int tspp2_pipe_last_address_used_get(u32 pipe_handle, u32 *address);
Get the last address the TSPP2 used.
int tspp2_data_write(u32 src_handle, u32 offset, u32 size);
Write (feed) data to a source.
User-space API
--------------
The TSPP2 driver does not provide any user-space API, only a kernel-space API.
The dvb/demux driver, which utilizes the TSPP2 driver (and HW), provides an
extensive user-space API, allowing upper layers to achieve complex demuxing
functionality.
For further information please refer to Documentation/dvb/qcom-mpq.txt.
Driver parameters
=================
The TSPP2 driver supports the following module parameter:
tspp2_iommu_bypass: Bypass VBIF/IOMMU and use physical buffer addresses
instead. This is mostly useful for debug purposes if something is wrong with
the IOMMU configuration. Default is false.
Platform-dependent parameters (e.g., IRQ numbers) are provided to the driver
via the device tree mechanism or the platform device data mechanism.
Config options
==============
To enable the driver, set CONFIG_TSPP2 to y (built-in) or m (kernel module)
in the kernel configuration menu.
Dependencies
============
a. The TSPP2 driver uses the SPS driver to control the BAM unit.
b. The TSPP2 driver uses the ION driver for IOMMU registration and buffer
mapping. The client is responsible to allocate memory buffers via ION.
User space utilities
====================
None.
Other
=====
None.
Known issues
============
None.
To do
=====
None.

View File

@ -98,19 +98,79 @@ size of the disk when not in use so a huge zram is wasteful.
mount /dev/zram1 /tmp
7) Stats:
Per-device statistics are exported as various nodes under
/sys/block/zram<id>/
disksize
num_reads
num_writes
invalid_io
notify_free
discard
zero_pages
orig_data_size
compr_data_size
mem_used_total
mem_used_max
Per-device statistics are exported as various nodes under /sys/block/zram<id>/
A brief description of exported device attritbutes. For more details please
read Documentation/ABI/testing/sysfs-block-zram.
Name access description
---- ------ -----------
disksize RW show and set the device's disk size
initstate RO shows the initialization state of the device
reset WO trigger device reset
num_reads RO the number of reads
failed_reads RO the number of failed reads
num_write RO the number of writes
failed_writes RO the number of failed writes
invalid_io RO the number of non-page-size-aligned I/O requests
max_comp_streams RW the number of possible concurrent compress operations
comp_algorithm RW show and change the compression algorithm
notify_free RO the number of notifications to free pages (either
slot free notifications or REQ_DISCARD requests)
zero_pages RO the number of zero filled pages written to this disk
orig_data_size RO uncompressed size of data stored in this disk
compr_data_size RO compressed size of data stored in this disk
mem_used_total RO the amount of memory allocated for this disk
mem_used_max RW the maximum amount memory zram have consumed to
store compressed data
mem_limit RW the maximum amount of memory ZRAM can use to store
the compressed data
num_migrated RO the number of objects migrated migrated by compaction
WARNING
=======
per-stat sysfs attributes are considered to be deprecated.
The basic strategy is:
-- the existing RW nodes will be downgraded to WO nodes (in linux 4.11)
-- deprecated RO sysfs nodes will eventually be removed (in linux 4.11)
The list of deprecated attributes can be found here:
Documentation/ABI/obsolete/sysfs-block-zram
Basically, every attribute that has its own read accessible sysfs node
(e.g. num_reads) *AND* is accessible via one of the stat files (zram<id>/stat
or zram<id>/io_stat or zram<id>/mm_stat) is considered to be deprecated.
User space is advised to use the following files to read the device statistics.
File /sys/block/zram<id>/stat
Represents block layer statistics. Read Documentation/block/stat.txt for
details.
File /sys/block/zram<id>/io_stat
The stat file represents device's I/O statistics not accounted by block
layer and, thus, not available in zram<id>/stat file. It consists of a
single line of text and contains the following stats separated by
whitespace:
failed_reads
failed_writes
invalid_io
notify_free
File /sys/block/zram<id>/mm_stat
The stat file represents device's mm statistics. It consists of a single
line of text and contains the following stats separated by whitespace:
orig_data_size
compr_data_size
mem_used_total
mem_limit
mem_used_max
zero_pages
num_migrated
8) Deactivate:
swapoff /dev/zram0

View File

@ -0,0 +1,42 @@
Boot time creation of mapped devices
===================================
It is possible to configure a device mapper device to act as the root
device for your system in two ways.
The first is to build an initial ramdisk which boots to a minimal
userspace which configures the device, then pivot_root(8) in to it.
For simple device mapper configurations, it is possible to boot directly
using the following kernel command line:
dm="<name> <uuid> <ro>,table line 1,...,table line n"
name = the name to associate with the device
after boot, udev, if used, will use that name to label
the device node.
uuid = may be 'none' or the UUID desired for the device.
ro = may be "ro" or "rw". If "ro", the device and device table will be
marked read-only.
Each table line may be as normal when using the dmsetup tool except for
two variations:
1. Any use of commas will be interpreted as a newline
2. Quotation marks cannot be escaped and cannot be used without
terminating the dm= argument.
Unless renamed by udev, the device node created will be dm-0 as the
first minor number for the device-mapper is used during early creation.
Example
=======
- Booting to a linear array made up of user-mode linux block devices:
dm="lroot none 0, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" \
root=/dev/dm-0
Will boot to a rw dm-linear target of 8192 sectors split across two
block devices identified by their major:minor numbers. After boot, udev
will rename this target to /dev/mapper/lroot (depending on the rules).
No uuid was assigned.

View File

@ -10,7 +10,8 @@ Construction Parameters
<version> <dev> <hash_dev>
<data_block_size> <hash_block_size>
<num_data_blocks> <hash_start_block>
<algorithm> <digest> <salt> <mode>
<algorithm> <digest> <salt>
[<#opt_params> <opt_params>]
<version>
This is the type of the on-disk hash format.
@ -62,14 +63,50 @@ Construction Parameters
<salt>
The hexadecimal encoding of the salt value.
<mode>
Optional. The mode of operation.
<#opt_params>
Number of optional parameters. If there are no optional parameters,
the optional paramaters section can be skipped or #opt_params can be zero.
Otherwise #opt_params is the number of following arguments.
0 is the normal mode of operation where a corrupted block will result in an
I/O error.
Example of optional parameters section:
1 ignore_corruption
1 is logging mode where corrupted blocks are logged and a uevent is sent to
notify user space.
ignore_corruption
Log corrupted blocks, but allow read operations to proceed normally.
restart_on_corruption
Restart the system when a corrupted block is discovered. This option is
not compatible with ignore_corruption and requires user space support to
avoid restart loops.
ignore_zero_blocks
Do not verify blocks that are expected to contain zeroes and always return
zeroes instead. This may be useful if the partition contains unused blocks
that are not guaranteed to contain zeroes.
use_fec_from_device
Use forward error correction (FEC) to recover from corruption if hash
verification fails. Use encoding data from the specified device. This
may be the same device where data and hash blocks reside, in which case
fec_start must be outside data and hash areas.
If the encoding data covers additional metadata, it must be accessible
on the hash device after the hash blocks.
Note: block sizes for data and hash devices must match.
fec_roots
Number of generator roots. This equals to the number of parity bytes in
the encoding data. For example, in RS(M, N) encoding, the number of roots
is M-N.
fec_blocks
The number of encoding data blocks on the FEC device. The block size for
the FEC device is <data_block_size>.
fec_start
This is the offset, in <data_block_size> blocks, from the start of the
FEC device to the beginning of the encoding data.
Theory of operation
@ -135,7 +172,7 @@ block boundary) are the hash blocks which are stored a depth at a time
The full specification of kernel parameters and on-disk metadata format
is available at the cryptsetup project's wiki page
http://code.google.com/p/cryptsetup/wiki/DMVerity
https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity
Status
======
@ -152,7 +189,7 @@ Set up a device:
A command line tool veritysetup is available to compute or verify
the hash tree or activate the kernel device. This is available from
the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
the cryptsetup upstream repository https://gitlab.com/cryptsetup/cryptsetup/
(as a libcryptsetup extension).
Create hash on the device:

View File

@ -0,0 +1,22 @@
* Qualcomm Technologies Inc Connectivity SubSystem Platform Driver
This platform driver adds support for the CNSS subsystem used for SDIO
based Wi-Fi devices. It also adds support to manage two 1.8V voltage
regulators and WLAN power enable 3.3V PMIC GPIO. The main purpose of this
device tree entry below is to invoke the CNSS SDIO platform driver
and provide handle to the WLAN power enable 3.3V pmic GPIO and two 1.8V
PMIC voltage regulator resources.
Required properties:
- compatible: "qcom,cnss_sdio"
- wlan-pmic-gpio: 3.3V PMIC GPIO for external power supply.
- vdd-wlan-io-supply: phandle to the WLAN IO regulator device tree node.
- vdd-wlan-xtal-supply: phandle to the WLAM XTAL regulator device tree node.
Example:
qcom,cnss-sdio {
compatible = "qcom,cnss_sdio";
cnss_sdio,wlan-pmic-gpio = <&pm8019_gpios 3 0>;
vdd-wlan-io-supply = <&mdmfermium_l11>;
vdd-wlan-xtal-supply = <&mdmfermium_l2>;
};

View File

@ -26,7 +26,23 @@ Required properties:
low latency.
regular : regular low latency stream
ultra : ultra low latency stream
ull-pp : ultra low latency stream with post-processing capability
* msm-pcm-dsp-noirq
Required properties:
- compatible : "qcom,msm-pcm-dsp-noirq";
Optional properties
- qcom,msm-pcm-low-latency : Flag indicating whether
the device node is of type low latency
- qcom,latency-level : Flag indicating whether the device node
is of type low latency or ultra low latency
ultra : ultra low latency stream
ull-pp : ultra low latency stream with post-processing capability
* msm-pcm-routing
Required properties:

View File

@ -417,6 +417,7 @@ Private_Dirty: 0 kB
Referenced: 892 kB
Anonymous: 0 kB
Swap: 0 kB
SwapPss: 0 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Locked: 374 kB
@ -427,16 +428,23 @@ the first of these lines shows the same information as is displayed for the
mapping in /proc/PID/maps. The remaining lines show the size of the mapping
(size), the amount of the mapping that is currently resident in RAM (RSS), the
process' proportional share of this mapping (PSS), the number of clean and
dirty private pages in the mapping. Note that even a page which is part of a
MAP_SHARED mapping, but has only a single pte mapped, i.e. is currently used
by only one process, is accounted as private and not as shared. "Referenced"
indicates the amount of memory currently marked as referenced or accessed.
dirty private pages in the mapping.
The "proportional set size" (PSS) of a process is the count of pages it has
in memory, where each page is divided by the number of processes sharing it.
So if a process has 1000 pages all to itself, and 1000 shared with one other
process, its PSS will be 1500.
Note that even a page which is part of a MAP_SHARED mapping, but has only
a single pte mapped, i.e. is currently used by only one process, is accounted
as private and not as shared.
"Referenced" indicates the amount of memory currently marked as referenced or
accessed.
"Anonymous" shows the amount of memory that does not belong to any file. Even
a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
and a page is modified, the file page is replaced by a private anonymous copy.
"Swap" shows how much would-be-anonymous memory is also used, but out on
swap.
"SwapPss" shows proportional swap share of this mapping.
"VmFlags" field deserves a separate description. This member represents the kernel
flags associated with the particular virtual memory area in two letter encoded
manner. The codes are the following:

View File

@ -46,6 +46,7 @@ parameter is applicable:
BLACKFIN Blackfin architecture is enabled.
CLK Common clock infrastructure is enabled.
CMA Contiguous Memory Area support is enabled.
DM Device mapper support is enabled.
DRM Direct Rendering Management support is enabled.
DYNAMIC_DEBUG Build in debug messages and enable them at runtime
EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
@ -745,6 +746,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Disable PIN 1 of APIC timer
Can be useful to work around chipset bugs.
dm= [DM] Allows early creation of a device-mapper device.
See Documentation/device-mapper/boot.txt.
dmasound= [HW,OSS] Sound subsystem buffers
dma_debug=off If the kernel is compiled with DMA_API_DEBUG support,
this option disables the debugging code at boot.

View File

@ -1281,6 +1281,13 @@ router_solicitations - INTEGER
routers are present.
Default: 3
use_oif_addrs_only - BOOLEAN
When enabled, the candidate source addresses for destinations
routed via this interface are restricted to the set of addresses
configured on this interface (vis. RFC 6724, section 4).
Default: false
use_tempaddr - INTEGER
Preference for Privacy Extensions (RFC3041).
<= 0 : disable Privacy Extensions

View File

@ -447,7 +447,6 @@ This file shows up if CONFIG_DEBUG_STACKOVERFLOW is enabled.
0: try to continue operation.
1: panic immediately.
==============================================================

View File

@ -43,6 +43,8 @@ Currently, these files are in /proc/sys/vm:
- min_slab_ratio
- min_unmapped_ratio
- mmap_min_addr
- mmap_rnd_bits
- mmap_rnd_compat_bits
- nr_hugepages
- nr_overcommit_hugepages
- nr_trim_pages (only if CONFIG_MMU=n)
@ -466,6 +468,33 @@ against future potential kernel bugs.
==============================================================
mmap_rnd_bits:
This value can be used to select the number of bits to use to
determine the random offset to the base address of vma regions
resulting from mmap allocations on architectures which support
tuning address space randomization. This value will be bounded
by the architecture's minimum and maximum supported values.
This value can be changed after boot using the
/proc/sys/vm/mmap_rnd_bits tunable
==============================================================
mmap_rnd_compat_bits:
This value can be used to select the number of bits to use to
determine the random offset to the base address of vma regions
resulting from mmap allocations for applications run in
compatibility mode on architectures which support tuning address
space randomization. This value will be bounded by the
architecture's minimum and maximum supported values.
This value can be changed after boot using the
/proc/sys/vm/mmap_rnd_compat_bits tunable
==============================================================
nr_hugepages
Change the minimum size of the hugepage pool.

View File

@ -25,6 +25,7 @@ cpufreq.
cpu_idle "state=%lu cpu_id=%lu"
cpu_frequency "state=%lu cpu_id=%lu"
cpu_frequency_limits "min=%lu max=%lu cpu_id=%lu"
A suspend event is used to indicate the system going in and out of the
suspend mode:

View File

@ -0,0 +1,70 @@
zsmalloc
--------
This allocator is designed for use with zram. Thus, the allocator is
supposed to work well under low memory conditions. In particular, it
never attempts higher order page allocation which is very likely to
fail under memory pressure. On the other hand, if we just use single
(0-order) pages, it would suffer from very high fragmentation --
any object of size PAGE_SIZE/2 or larger would occupy an entire page.
This was one of the major issues with its predecessor (xvmalloc).
To overcome these issues, zsmalloc allocates a bunch of 0-order pages
and links them together using various 'struct page' fields. These linked
pages act as a single higher-order page i.e. an object can span 0-order
page boundaries. The code refers to these linked pages as a single entity
called zspage.
For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
since this satisfies the requirements of all its current users (in the
worst case, page is incompressible and is thus stored "as-is" i.e. in
uncompressed form). For allocation requests larger than this size, failure
is returned (see zs_malloc).
Additionally, zs_malloc() does not return a dereferenceable pointer.
Instead, it returns an opaque handle (unsigned long) which encodes actual
location of the allocated object. The reason for this indirection is that
zsmalloc does not keep zspages permanently mapped since that would cause
issues on 32-bit systems where the VA region for kernel space mappings
is very small. So, before using the allocating memory, the object has to
be mapped using zs_map_object() to get a usable pointer and subsequently
unmapped using zs_unmap_object().
stat
----
With CONFIG_ZSMALLOC_STAT, we could see zsmalloc internal information via
/sys/kernel/debug/zsmalloc/<user name>. Here is a sample of stat output:
# cat /sys/kernel/debug/zsmalloc/zram0/classes
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage
..
..
9 176 0 1 186 129 8 4
10 192 1 0 2880 2872 135 3
11 208 0 1 819 795 42 2
12 224 0 1 219 159 12 4
..
..
class: index
size: object size zspage stores
almost_empty: the number of ZS_ALMOST_EMPTY zspages(see below)
almost_full: the number of ZS_ALMOST_FULL zspages(see below)
obj_allocated: the number of objects allocated
obj_used: the number of objects allocated to the user
pages_used: the number of pages allocated for the class
pages_per_zspage: the number of 0-order pages to make a zspage
We assign a zspage to ZS_ALMOST_EMPTY fullness group when:
n <= N / f, where
n = number of allocated objects
N = total number of objects zspage can store
f = fullness_threshold_frac(ie, 4 at the moment)
Similarly, we assign zspage to:
ZS_ALMOST_FULL when n > N / f
ZS_EMPTY when n == 0
ZS_FULL when n == N

View File

@ -9224,6 +9224,15 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
F: drivers/tty/serial/zs.*
ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org>
L: linux-mm@kvack.org
S: Maintained
F: mm/zsmalloc.c
F: include/linux/zsmalloc.h
F: Documentation/vm/zsmalloc.txt
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@linux.vnet.ibm.com>
L: linux-mm@kvack.org

View File

@ -455,6 +455,74 @@ config HAVE_UNDERSCORE_SYMBOL_PREFIX
Some architectures generate an _ in front of C symbols; things like
module loading and assembly files need to know about this.
config HAVE_ARCH_MMAP_RND_BITS
bool
help
An arch should select this symbol if it supports setting a variable
number of bits for use in establishing the base address for mmap
allocations, has MMU enabled and provides values for both:
- ARCH_MMAP_RND_BITS_MIN
- ARCH_MMAP_RND_BITS_MAX
config ARCH_MMAP_RND_BITS_MIN
int
config ARCH_MMAP_RND_BITS_MAX
int
config ARCH_MMAP_RND_BITS_DEFAULT
int
config ARCH_MMAP_RND_BITS
int "Number of bits to use for ASLR of mmap base address" if EXPERT
range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
default ARCH_MMAP_RND_BITS_MIN
depends on HAVE_ARCH_MMAP_RND_BITS
help
This value can be used to select the number of bits to use to
determine the random offset to the base address of vma regions
resulting from mmap allocations. This value will be bounded
by the architecture's minimum and maximum supported values.
This value can be changed after boot using the
/proc/sys/vm/mmap_rnd_bits tunable
config HAVE_ARCH_MMAP_RND_COMPAT_BITS
bool
help
An arch should select this symbol if it supports running applications
in compatibility mode, supports setting a variable number of bits for
use in establishing the base address for mmap allocations, has MMU
enabled and provides values for both:
- ARCH_MMAP_RND_COMPAT_BITS_MIN
- ARCH_MMAP_RND_COMPAT_BITS_MAX
config ARCH_MMAP_RND_COMPAT_BITS_MIN
int
config ARCH_MMAP_RND_COMPAT_BITS_MAX
int
config ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
int
config ARCH_MMAP_RND_COMPAT_BITS
int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
default ARCH_MMAP_RND_COMPAT_BITS_MIN
depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
help
This value can be used to select the number of bits to use to
determine the random offset to the base address of vma regions
resulting from mmap allocations for compatible applications This
value will be bounded by the architecture's minimum and maximum
supported values.
This value can be changed after boot using the
/proc/sys/vm/mmap_rnd_compat_bits tunable
#
# ABI hall of shame
#

View File

@ -23,6 +23,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
@ -301,6 +302,14 @@ config MMU
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
config ARCH_MMAP_RND_BITS_MIN
default 8
config ARCH_MMAP_RND_BITS_MAX
default 14 if PAGE_OFFSET=0x40000000
default 15 if PAGE_OFFSET=0x80000000
default 16
#
# The "ARM system type" choice list is ordered alphabetically by option
# text. Please add new entries in the option alphabetic order.

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
system_contig_heap: qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
cp_mm_heap: qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
system_contig_heap: qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qsecom_heap: qcom,ion-heap@27 { /* QSECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@27 { /* QSECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
system_contig_heap: qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qsecom_heap: qcom,ion-heap@27 { /* QSEECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1660,6 +1660,7 @@
qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@27 { /* QSEECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -288,6 +288,12 @@
clock-frequency = <19200000>;
};
qcom,mpm2-sleep-counter@fc4a3000 {
compatible = "qcom,mpm2-sleep-counter";
reg = <0xfc4a3000 0x1000>;
clock-frequency = <32768>;
};
timer@f9020000 {
#address-cells = <1>;
#size-cells = <1>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, Linux Foundation. All rights reserved.
/* Copyright (c) 2014,2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -21,11 +21,6 @@
qcom,ion-heap-type = "SYSTEM";
};
qcom,ion-heap@21 {
reg = <21>;
qcom,ion-heap-type = "SYSTEM_CONTIG";
};
qcom,ion-heap@8 { /* CP_MM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <8>;

View File

@ -271,8 +271,8 @@ CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_USBNET=y
CONFIG_MSM_RMNET_USB=y
CONFIG_CNSS=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_PCI=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y

View File

@ -272,8 +272,8 @@ CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_USBNET=y
CONFIG_MSM_RMNET_USB=y
CONFIG_CNSS=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_PCI=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y

View File

@ -206,8 +206,8 @@ CONFIG_PPP_ASYNC=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_CNSS=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_PCI=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
# CONFIG_INPUT_MOUSEDEV is not set

View File

@ -205,8 +205,8 @@ CONFIG_PPP_ASYNC=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_CNSS=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_PCI=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
# CONFIG_INPUT_MOUSEDEV is not set

View File

@ -200,7 +200,12 @@ CONFIG_RNDIS_IPA=y
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_PPP=y
CONFIG_PPP_ASYNC=y
CONFIG_CNSS=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USBNET_IPA_BRIDGE=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_PCI=y
CONFIG_CLD_LL_CORE=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y

View File

@ -199,7 +199,12 @@ CONFIG_RNDIS_IPA=y
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_PPP=y
CONFIG_PPP_ASYNC=y
CONFIG_CNSS=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USBNET_IPA_BRIDGE=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_PCI=y
CONFIG_CLD_LL_CORE=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y

View File

@ -240,6 +240,7 @@ armpmu_add(struct perf_event *event, int flags)
pr_err("Event: %llx failed constraint check.\n",
event->attr.config);
event->state = PERF_EVENT_STATE_OFF;
err = -EPERM;
goto out;
}
@ -271,21 +272,30 @@ out:
}
static int
validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct arm_pmu *armpmu;
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
/*
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
* core perf code won't check that the pmu->ctx == leader->ctx
* until after pmu->event_init(event).
*/
if (event->pmu != pmu)
return 0;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, event) >= 0;
}
@ -303,15 +313,15 @@ validate_group(struct perf_event *event)
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;

View File

@ -99,7 +99,7 @@ static int buffer_req(struct msm_dma_alloc_req *req)
if (i >= MAX_TEST_BUFFERS)
goto error;
buffers[i] = kmalloc(req->size, GFP_KERNEL | __GFP_DMA);
buffers[i] = kzalloc(req->size, GFP_KERNEL | __GFP_DMA);
if (buffers[i] == 0)
goto error;
sizes[i] = req->size;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011,2012,2014 The Linux Foundation. All rights reserved.
* Copyright (c) 2011,2012,2014,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -18,13 +18,15 @@
#include <mach/msm-krait-l2-accessors.h>
#define PMU_CODES_SIZE 64
/*
* The L2 PMU is shared between all CPU's, so protect
* its bitmap access.
*/
struct pmu_constraints {
u64 pmu_bitmap;
u8 codes[64];
u8 codes[PMU_CODES_SIZE];
raw_spinlock_t lock;
} l2_pmu_constraints = {
.pmu_bitmap = 0,
@ -427,10 +429,9 @@ static int msm_l2_test_set_ev_constraint(struct perf_event *event)
u8 group = evt_type & 0x0000F;
u8 code = (evt_type & 0x00FF0) >> 4;
unsigned long flags;
u32 err = 0;
int err = 0;
u64 bitmap_t;
u32 shift_idx;
if (evt_prefix == L2_TRACECTR_PREFIX)
return err;
/*
@ -444,6 +445,11 @@ static int msm_l2_test_set_ev_constraint(struct perf_event *event)
shift_idx = ((reg * 4) + group);
if (shift_idx >= PMU_CODES_SIZE) {
err = -EINVAL;
goto out;
}
bitmap_t = 1 << shift_idx;
if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
@ -468,6 +474,7 @@ static int msm_l2_test_set_ev_constraint(struct perf_event *event)
if (!(event->cpu < 0)) {
event->state = PERF_EVENT_STATE_OFF;
event->attr.constraint_duplicate = 1;
err = -EPERM;
}
}
out:
@ -484,6 +491,7 @@ static int msm_l2_clear_ev_constraint(struct perf_event *event)
unsigned long flags;
u64 bitmap_t;
u32 shift_idx;
int err = 1;
if (evt_prefix == L2_TRACECTR_PREFIX)
return 1;
@ -491,6 +499,10 @@ static int msm_l2_clear_ev_constraint(struct perf_event *event)
shift_idx = ((reg * 4) + group);
if (shift_idx >= PMU_CODES_SIZE) {
err = -EINVAL;
goto out;
}
bitmap_t = 1 << shift_idx;
/* Clear constraint bit. */
@ -498,9 +510,9 @@ static int msm_l2_clear_ev_constraint(struct perf_event *event)
/* Clear code. */
l2_pmu_constraints.codes[shift_idx] = -1;
out:
raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
return 1;
return err;
}
int get_num_events(void)

View File

@ -173,10 +173,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
/* 8 bits of randomness in 20 address space bits */
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE))
random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
random_factor = (get_random_long() & ((1UL << mmap_rnd_bits) - 1)) << PAGE_SHIFT;
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;

View File

@ -929,7 +929,7 @@ out:
static void bpf_jit_free_worker(struct work_struct *work)
{
module_free(NULL, work);
module_memfree(work);
}
void bpf_jit_free(struct sk_filter *fp)

View File

@ -32,6 +32,8 @@ config ARM64
select HARDIRQS_SW_RESEND
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_TRACEHOOK
@ -83,6 +85,27 @@ config MMU
config NO_IOPORT_MAP
def_bool y
config ARCH_MMAP_RND_BITS_MIN
default 14 if ARM64_64K_PAGES
default 18
# max bits determined by the following formula:
# VA_BITS - PAGE_SHIFT - 3
# VA_BITS is always 39
config ARCH_MMAP_RND_BITS_MAX
default 20 if ARM64_64K_PAGES
default 24
config ARCH_MMAP_RND_COMPAT_BITS_MIN
default 11
config ARCH_MMAP_RND_COMPAT_BITS_MAX
default 16
config ILLEGAL_POINTER_VALUE
hex
default 0xdead000000000000
config STACKTRACE_SUPPORT
def_bool y

View File

@ -37,6 +37,7 @@ CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MSM=y
@ -89,6 +90,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_IPCOMP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
@ -241,6 +243,7 @@ CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
CONFIG_RNDIS_IPA=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@ -251,14 +254,13 @@ CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
CONFIG_CNSS=y
CONFIG_BUS_AUTO_SUSPEND=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
CONFIG_E1000E=y
CONFIG_CNSS_PCI=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y
@ -573,6 +575,5 @@ CONFIG_CONSOLE_FLUSH_ON_HOTPLUG=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_QCOM_NPA_DUMP=y
CONFIG_KERNEL_TEXT_MPU_PROT=y
CONFIG_MSM_CORE_CTL_HELPER=y
CONFIG_UID_CPUTIME=y

View File

@ -94,6 +94,7 @@ CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
@ -253,6 +254,7 @@ CONFIG_DUMMY=y
CONFIG_TUN=y
CONFIG_KS8851=y
CONFIG_RNDIS_IPA=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@ -266,16 +268,13 @@ CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
CONFIG_CNSS=y
CONFIG_BUS_AUTO_SUSPEND=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
CONFIG_NET_VENDOR_REALTEK=y
CONFIG_R8169=y
CONFIG_E1000E=y
CONFIG_CNSS_PCI=y
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y

View File

@ -540,6 +540,7 @@ CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_SECURITY_SELINUX=y
CONFIG_CRYPTO_NULL=y

View File

@ -143,7 +143,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
unsigned long action, void *data)
{
int cpu = (unsigned long)data;
if (action == CPU_ONLINE)
if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single(cpu, clear_os_lock, NULL, 1);
return NOTIFY_OK;
}

View File

@ -885,7 +885,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
void *hcpu)
{
int cpu = (long)hcpu;
if (action == CPU_ONLINE)
if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
return NOTIFY_OK;
}

View File

@ -357,22 +357,31 @@ out:
}
static int
validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct arm_pmu *armpmu;
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
/*
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
* core perf code won't check that the pmu->ctx == leader->ctx
* until after pmu->event_init(event).
*/
if (event->pmu != pmu)
return 0;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
}
@ -390,15 +399,15 @@ validate_group(struct perf_event *event)
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;

View File

@ -337,8 +337,10 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
}
machine_name = of_flat_dt_get_machine_name();
if (machine_name)
if (machine_name) {
dump_stack_set_arch_desc("%s (DT)", machine_name);
pr_info("Machine: %s\n", machine_name);
}
}
/*
@ -536,6 +538,8 @@ static int c_show(struct seq_file *m, void *v)
{
int i, j;
seq_printf(m, "Processor\t: %s rev %d (%s)\n",
cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
for_each_present_cpu(i) {
struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
u32 midr = cpuinfo->reg_midr;
@ -583,6 +587,11 @@ static int c_show(struct seq_file *m, void *v)
}
#endif
if (!arch_read_hardware_id)
seq_printf(m, "Hardware\t: %s\n", machine_name);
else
seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
return 0;
}

View File

@ -88,6 +88,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
memset(ptr, 0, size);
*ret_page = pool->pages[pageno];
} else {
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
@ -208,6 +209,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
page = pfn_to_page(pfn);
addr = page_address(page);
memset(addr, 0, size);
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) {

View File

@ -283,6 +283,7 @@ retry:
* starvation.
*/
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}

View File

@ -47,22 +47,19 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
/*
* Since get_random_int() returns the same value within a 1 jiffy window, we
* will almost always get the same randomisation for the stack and mmap
* region. This will mean the relative distance between stack and mmap will be
* the same.
*
* To avoid this we can shift the randomness by 1 bit.
*/
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
if (current->flags & PF_RANDOMIZE)
rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
return rnd << (PAGE_SHIFT + 1);
if (current->flags & PF_RANDOMIZE) {
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT))
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
}
return rnd << PAGE_SHIFT;
}
static unsigned long mmap_base(void)

View File

@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
void module_memfree(void *module_region)
{
kfree(module_region);
}

View File

@ -147,7 +147,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE) {
random_factor = get_random_int();
random_factor = get_random_long();
random_factor = random_factor << PAGE_SHIFT;
if (TASK_IS_32BIT_ADDR)
random_factor &= 0xfffffful;
@ -166,7 +166,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
static inline unsigned long brk_rnd(void)
{
unsigned long rnd = get_random_int();
unsigned long rnd = get_random_long();
rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */

View File

@ -1450,9 +1450,9 @@ static inline unsigned long brk_rnd(void)
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
else
rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
return rnd << PAGE_SHIFT;
}

View File

@ -60,9 +60,9 @@ static unsigned long mmap_rnd(void)
if (current->flags & PF_RANDOMIZE) {
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
else
rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
}
return rnd << PAGE_SHIFT;
}

View File

@ -691,11 +691,11 @@ out:
static void jit_free_defer(struct work_struct *arg)
{
module_free(NULL, arg);
module_memfree(arg);
}
/* run from softirq, we must use a work_struct to call
* module_free() from process context
* module_memfree() from process context
*/
void bpf_jit_free(struct sk_filter *fp)
{

View File

@ -265,7 +265,7 @@ static unsigned long mmap_rnd(void)
unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
unsigned long val = get_random_int();
unsigned long val = get_random_long();
if (test_thread_flag(TIF_32BIT))
rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
else

View File

@ -773,7 +773,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
module_free(NULL, image);
module_memfree(image);
return;
}
memcpy(image + proglen, temp, ilen);
@ -819,11 +819,11 @@ out:
static void jit_free_defer(struct work_struct *arg)
{
module_free(NULL, arg);
module_memfree(arg);
}
/* run from softirq, we must use a work_struct to call
* module_free() from process context
* module_memfree() from process context
*/
void bpf_jit_free(struct sk_filter *fp)
{

View File

@ -74,7 +74,7 @@ error:
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
void module_memfree(void *module_region)
{
vfree(module_region);

View File

@ -97,6 +97,8 @@ config X86
select DCACHE_WORD_ACCESS
select GENERIC_SMP_IDLE_THREAD
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select BUILDTIME_EXTABLE_SORT
select GENERIC_CMOS_UPDATE
@ -150,6 +152,20 @@ config HAVE_LATENCYTOP_SUPPORT
config MMU
def_bool y
config ARCH_MMAP_RND_BITS_MIN
default 28 if 64BIT
default 8
config ARCH_MMAP_RND_BITS_MAX
default 32 if 64BIT
default 16
config ARCH_MMAP_RND_COMPAT_BITS_MIN
default 8
config ARCH_MMAP_RND_COMPAT_BITS_MAX
default 16
config SBUS
bool

View File

@ -69,15 +69,15 @@ static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
/*
* 8 bits of randomness in 32bit mmaps, 20 address space bits
* 28 bits of randomness in 64bit mmaps, 40 address space bits
*/
if (current->flags & PF_RANDOMIZE) {
if (mmap_is_ia32())
rnd = get_random_int() % (1<<8);
#ifdef CONFIG_COMPAT
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
#else
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
#endif
else
rnd = get_random_int() % (1<<28);
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
}
return rnd << PAGE_SHIFT;
}

View File

@ -829,6 +829,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
if (iter) {
class_dev_iter_exit(iter);
kfree(iter);
seqf->private = NULL;
}
}

View File

@ -717,7 +717,7 @@ void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
if (ws->active && len < max) {
if (!active)
len += scnprintf(pending_wakeup_source, max,
"Pending Wakeup Sources: ");

View File

@ -107,7 +107,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS

View File

@ -1,6 +1,7 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && ZSMALLOC
depends on BLOCK && SYSFS
select ZPOOL
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
@ -23,7 +24,7 @@ config ZRAM_LZ4_COMPRESS
default n
help
This option enables LZ4 compression algorithm support. Compression
algorithm can be changed using 'comp_algorithm' device attribute.
algorithm can be changed using `comp_algorithm' device attribute.
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"

View File

@ -2,4 +2,4 @@ zram-y := zcomp_lzo.o zcomp.o zram_drv.o
zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o
obj-$(CONFIG_ZRAM) += zram.o
obj-$(CONFIG_ZRAM) += zram.o

View File

@ -325,12 +325,14 @@ void zcomp_destroy(struct zcomp *comp)
* allocate new zcomp and initialize it. return compressing
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
* case of allocation error.
* case of allocation error, or any other error potentially
* returned by functions zcomp_strm_{multi,single}_create.
*/
struct zcomp *zcomp_create(const char *compress, int max_strm)
{
struct zcomp *comp;
struct zcomp_backend *backend;
int error;
backend = find_backend(compress);
if (!backend)
@ -342,12 +344,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
comp->backend = backend;
if (max_strm > 1)
zcomp_strm_multi_create(comp, max_strm);
error = zcomp_strm_multi_create(comp, max_strm);
else
zcomp_strm_single_create(comp);
if (!comp->stream) {
error = zcomp_strm_single_create(comp);
if (error) {
kfree(comp);
return ERR_PTR(-ENOMEM);
return ERR_PTR(error);
}
return comp;
}

View File

@ -36,6 +36,8 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/ratelimit.h>
#include <linux/show_mem_notifier.h>
#include <linux/err.h>
#include "zram_drv.h"
@ -44,47 +46,110 @@
static int zram_major;
static struct zram *zram_devices;
static const char *default_compressor = "lzo";
#define BACKEND_PARAM_BUF_SIZE 32
static char backend_param_buf[BACKEND_PARAM_BUF_SIZE];
/*
* We don't need to see memory allocation errors more than once every 1
* second to know that a problem is occurring.
*/
#define ALLOC_ERROR_LOG_RATE_MS 1000
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
static inline void deprecated_attr_warn(const char *name)
{
pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
task_pid_nr(current),
current->comm,
name,
"See zram documentation.");
}
#define ZRAM_ATTR_RO(name) \
static ssize_t name##_show(struct device *d, \
static ssize_t zram_attr_##name##_show(struct device *d, \
struct device_attribute *attr, char *b) \
{ \
struct zram *zram = dev_to_zram(d); \
\
deprecated_attr_warn(__stringify(name)); \
return scnprintf(b, PAGE_SIZE, "%llu\n", \
(u64)atomic64_read(&zram->stats.name)); \
} \
static DEVICE_ATTR_RO(name);
static struct device_attribute dev_attr_##name = \
__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
#define __ATTR_WO(_name) { \
.attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
.store = _name##_store, \
}
#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \
_name##_show, _name##_store)
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
static inline int init_done(struct zram *zram)
static inline bool init_done(struct zram *zram)
{
return zram->meta != NULL;
return zram->disksize;
}
static int zram_show_mem_notifier(struct notifier_block *nb,
unsigned long action,
void *data)
{
int i;
if (!zram_devices)
return 0;
for (i = 0; i < num_devices; i++) {
struct zram *zram = &zram_devices[i];
struct zram_meta *meta = zram->meta;
if (!down_read_trylock(&zram->init_lock))
continue;
if (init_done(zram)) {
u64 val;
u64 data_size;
val = zpool_get_total_size(meta->mem_pool);
data_size = atomic64_read(&zram->stats.compr_data_size);
pr_info("Zram[%d] mem_used_total = %llu\n", i, val);
pr_info("Zram[%d] compr_data_size = %llu\n", i,
(unsigned long long)data_size);
pr_info("Zram[%d] orig_data_size = %u\n", i,
atomic_read(&zram->stats.pages_stored));
}
up_read(&zram->init_lock);
}
return 0;
}
static struct notifier_block zram_show_mem_notifier_block = {
.notifier_call = zram_show_mem_notifier
};
static inline struct zram *dev_to_zram(struct device *dev)
{
return (struct zram *)dev_to_disk(dev)->private_data;
}
static ssize_t compact_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
unsigned long nr_migrated;
struct zram *zram = dev_to_zram(dev);
struct zram_meta *meta;
down_read(&zram->init_lock);
if (!init_done(zram)) {
up_read(&zram->init_lock);
return -EINVAL;
}
meta = zram->meta;
nr_migrated = zpool_compact(meta->mem_pool);
atomic64_add(nr_migrated, &zram->stats.num_migrated);
up_read(&zram->init_lock);
return len;
}
static ssize_t disksize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -111,6 +176,7 @@ static ssize_t orig_data_size_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
deprecated_attr_warn("orig_data_size");
return scnprintf(buf, PAGE_SIZE, "%llu\n",
(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
}
@ -121,14 +187,15 @@ static ssize_t mem_used_total_show(struct device *dev,
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
deprecated_attr_warn("mem_used_total");
down_read(&zram->init_lock);
if (init_done(zram)) {
struct zram_meta *meta = zram->meta;
val = zs_get_total_pages(meta->mem_pool);
val = zpool_get_total_size(meta->mem_pool);
}
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
}
static ssize_t max_comp_streams_show(struct device *dev,
@ -150,6 +217,7 @@ static ssize_t mem_limit_show(struct device *dev,
u64 val;
struct zram *zram = dev_to_zram(dev);
deprecated_attr_warn("mem_limit");
down_read(&zram->init_lock);
val = zram->limit_pages;
up_read(&zram->init_lock);
@ -181,6 +249,7 @@ static ssize_t mem_used_max_show(struct device *dev,
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
deprecated_attr_warn("mem_used_max");
down_read(&zram->init_lock);
if (init_done(zram))
val = atomic_long_read(&zram->stats.max_used_pages);
@ -204,7 +273,7 @@ static ssize_t mem_used_max_store(struct device *dev,
if (init_done(zram)) {
struct zram_meta *meta = zram->meta;
atomic_long_set(&zram->stats.max_used_pages,
zs_get_total_pages(meta->mem_pool));
zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT);
}
up_read(&zram->init_lock);
@ -329,42 +398,70 @@ static inline int valid_io_request(struct zram *zram,
return 1;
}
static void zram_meta_free(struct zram_meta *meta)
static void zram_meta_free(struct zram_meta *meta, u64 disksize)
{
zs_destroy_pool(meta->mem_pool);
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++) {
unsigned long handle = meta->table[index].handle;
if (!handle)
continue;
zpool_free(meta->mem_pool, handle);
}
zpool_destroy_pool(meta->mem_pool);
vfree(meta->table);
kfree(meta);
}
static struct zram_meta *zram_meta_alloc(u64 disksize)
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
{
size_t num_pages;
char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
char *backend;
if (!meta)
goto out;
return NULL;
num_pages = disksize >> PAGE_SHIFT;
meta->table = vzalloc(num_pages * sizeof(*meta->table));
if (!meta->table) {
pr_err("Error allocating zram address table\n");
goto free_meta;
goto out_error;
}
meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
backend = strlen(backend_param_buf) ? backend_param_buf : "zsmalloc";
meta->mem_pool = zpool_create_pool(backend, pool_name,
GFP_NOIO | __GFP_NOWARN, NULL);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto free_table;
goto out_error;
}
return meta;
free_table:
out_error:
vfree(meta->table);
free_meta:
kfree(meta);
meta = NULL;
out:
return meta;
return NULL;
}
static inline bool zram_meta_get(struct zram *zram)
{
if (atomic_inc_not_zero(&zram->refcount))
return true;
return false;
}
static inline void zram_meta_put(struct zram *zram)
{
atomic_dec(&zram->refcount);
}
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
@ -427,7 +524,7 @@ static void zram_free_page(struct zram *zram, size_t index)
return;
}
zs_free(meta->mem_pool, handle);
zpool_free(meta->mem_pool, handle);
atomic64_sub(zram_get_obj_size(meta, index),
&zram->stats.compr_data_size);
@ -450,17 +547,17 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
size = zram_get_obj_size(meta, index);
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
clear_page(mem);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO);
if (size == PAGE_SIZE)
copy_page(mem, cmem);
else
ret = zcomp_decompress(zram->comp, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
zpool_unmap_handle(meta->mem_pool, handle);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
/* Should NEVER happen. Return bio error if it does. */
@ -525,7 +622,7 @@ out_cleanup:
static inline void update_used_max(struct zram *zram,
const unsigned long pages)
{
int old_max, cur_max;
unsigned long old_max, cur_max;
old_max = atomic_long_read(&zram->stats.max_used_pages);
@ -546,6 +643,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
static unsigned long zram_rs_time;
struct zcomp_strm *zstrm;
bool locked = false;
unsigned long alloced_pages;
@ -593,6 +691,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
/*
* zram_slot_free_notify could miss free so that let's
* double check.
*/
if (unlikely(meta->table[index].handle ||
zram_test_flag(meta, index, ZRAM_ZERO)))
zram_free_page(zram, index);
ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
@ -611,24 +717,26 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem;
}
handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
index, clen);
if (zpool_malloc(meta->mem_pool, clen, __GFP_NORETRY | __GFP_NOWARN,
&handle) != 0) {
if (printk_timed_ratelimit(&zram_rs_time,
ALLOC_ERROR_LOG_RATE_MS))
pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
index, clen);
ret = -ENOMEM;
goto out;
}
alloced_pages = zs_get_total_pages(meta->mem_pool);
alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT;
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
zs_free(meta->mem_pool, handle);
zpool_free(meta->mem_pool, handle);
ret = -ENOMEM;
goto out;
}
update_used_max(zram, alloced_pages);
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
@ -640,7 +748,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zcomp_strm_release(zram->comp, zstrm);
locked = false;
zs_unmap_object(meta->mem_pool, handle);
zpool_unmap_handle(meta->mem_pool, handle);
/*
* Free memory associated with this sector
@ -726,10 +834,11 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
}
static void zram_reset_device(struct zram *zram, bool reset_capacity)
static void zram_reset_device(struct zram *zram)
{
size_t index;
struct zram_meta *meta;
struct zcomp *comp;
u64 disksize;
down_write(&zram->init_lock);
@ -741,36 +850,32 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
}
meta = zram->meta;
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
unsigned long handle = meta->table[index].handle;
if (!handle)
continue;
comp = zram->comp;
disksize = zram->disksize;
/*
* Refcount will go down to 0 eventually and r/w handler
* cannot handle further I/O so it will bail out by
* check zram_meta_get.
*/
zram_meta_put(zram);
/*
* We want to free zram_meta in process context to avoid
* deadlock between reclaim path and any other locks.
*/
wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
zs_free(meta->mem_pool, handle);
}
zcomp_destroy(zram->comp);
zram->max_comp_streams = 1;
zram_meta_free(zram->meta);
zram->meta = NULL;
/* Reset stats */
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
if (reset_capacity)
set_capacity(zram->disk, 0);
zram->max_comp_streams = 1;
set_capacity(zram->disk, 0);
part_stat_set_all(&zram->disk->part0, 0);
up_write(&zram->init_lock);
/*
* Revalidate disk out of the init_lock to avoid lockdep splat.
* It's okay because disk's capacity is protected by init_lock
* so that revalidate_disk always sees up-to-date capacity.
*/
if (reset_capacity)
revalidate_disk(zram->disk);
/* I/O operation under all of CPU are done so let's free */
zram_meta_free(meta, disksize);
zcomp_destroy(comp);
}
static ssize_t disksize_store(struct device *dev,
@ -787,7 +892,7 @@ static ssize_t disksize_store(struct device *dev,
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize);
meta = zram_meta_alloc(zram->disk->first_minor, disksize);
if (!meta)
return -ENOMEM;
@ -806,6 +911,8 @@ static ssize_t disksize_store(struct device *dev,
goto out_destroy_comp;
}
init_waitqueue_head(&zram->io_done);
atomic_set(&zram->refcount, 1);
zram->meta = meta;
zram->comp = comp;
zram->disksize = disksize;
@ -825,7 +932,7 @@ out_destroy_comp:
up_write(&zram->init_lock);
zcomp_destroy(comp);
out_free_meta:
zram_meta_free(meta);
zram_meta_free(meta, disksize);
return err;
}
@ -843,8 +950,9 @@ static ssize_t reset_store(struct device *dev,
if (!bdev)
return -ENOMEM;
mutex_lock(&bdev->bd_mutex);
/* Do not reset an active device! */
if (bdev->bd_holders) {
if (bdev->bd_openers) {
ret = -EBUSY;
goto out;
}
@ -860,26 +968,28 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all pending I/O is finished */
fsync_bdev(bdev);
zram_reset_device(zram);
mutex_unlock(&bdev->bd_mutex);
revalidate_disk(zram->disk);
bdput(bdev);
zram_reset_device(zram, true);
return len;
out:
mutex_unlock(&bdev->bd_mutex);
bdput(bdev);
return ret;
}
static void __zram_make_request(struct zram *zram, struct bio *bio)
{
int offset, rw;
int i, offset, rw;
u32 index;
struct bio_vec *bvec;
int iter;
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
offset = (bio->bi_sector &
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
zram_bio_discard(zram, index, offset, bio);
@ -887,9 +997,8 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
return;
}
rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
bio_for_each_segment(bvec, bio, i) {
int max_transfer_size = PAGE_SIZE - offset;
if (bvec->bv_len > max_transfer_size) {
@ -932,23 +1041,21 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
down_read(&zram->init_lock);
if (unlikely(!init_done(zram)))
if (unlikely(!zram_meta_get(zram)))
goto error;
if (!valid_io_request(zram, bio->bi_sector,
bio->bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
goto error;
goto put_zram;
}
__zram_make_request(zram, bio);
up_read(&zram->init_lock);
zram_meta_put(zram);
return;
put_zram:
zram_meta_put(zram);
error:
up_read(&zram->init_lock);
bio_io_error(bio);
}
@ -972,16 +1079,72 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_RO(orig_data_size);
static DEVICE_ATTR_RO(mem_used_total);
static DEVICE_ATTR_RW(mem_limit);
static DEVICE_ATTR_RW(mem_used_max);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
static DEVICE_ATTR(compact, S_IWUSR, NULL, compact_store);
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
mem_limit_store);
static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
mem_used_max_store);
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
max_comp_streams_show, max_comp_streams_store);
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
comp_algorithm_show, comp_algorithm_store);
static ssize_t io_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
"%8llu %8llu %8llu %8llu\n",
(u64)atomic64_read(&zram->stats.failed_reads),
(u64)atomic64_read(&zram->stats.failed_writes),
(u64)atomic64_read(&zram->stats.invalid_io),
(u64)atomic64_read(&zram->stats.notify_free));
up_read(&zram->init_lock);
return ret;
}
static ssize_t mm_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
u64 orig_size, mem_used = 0;
long max_used;
ssize_t ret;
down_read(&zram->init_lock);
if (init_done(zram))
mem_used = zpool_get_total_size(zram->meta->mem_pool)
>> PAGE_SHIFT;
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
ret = scnprintf(buf, PAGE_SIZE,
"%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
mem_used << PAGE_SHIFT,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.zero_pages),
(u64)atomic64_read(&zram->stats.num_migrated));
up_read(&zram->init_lock);
return ret;
}
static DEVICE_ATTR(io_stat, S_IRUGO, io_stat_show, NULL);
static DEVICE_ATTR(mm_stat, S_IRUGO, mm_stat_show, NULL);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
ZRAM_ATTR_RO(failed_reads);
@ -999,6 +1162,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_num_writes.attr,
&dev_attr_failed_reads.attr,
&dev_attr_failed_writes.attr,
&dev_attr_compact.attr,
&dev_attr_invalid_io.attr,
&dev_attr_notify_free.attr,
&dev_attr_zero_pages.attr,
@ -1009,6 +1173,8 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_used_max.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
NULL,
};
@ -1018,41 +1184,42 @@ static struct attribute_group zram_disk_attr_group = {
static int create_device(struct zram *zram, int device_id)
{
struct request_queue *queue;
int ret = -ENOMEM;
init_rwsem(&zram->init_lock);
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
queue = blk_alloc_queue(GFP_KERNEL);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
goto out;
}
blk_queue_make_request(zram->queue, zram_make_request);
zram->queue->queuedata = zram;
blk_queue_make_request(queue, zram_make_request);
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
pr_warn("Error allocating disk structure for device %d\n",
device_id);
ret = -ENOMEM;
goto out_free_queue;
}
zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
zram->disk->fops = &zram_devops;
zram->disk->queue = zram->queue;
zram->disk->queue = queue;
zram->disk->queue->queuedata = zram;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
__set_bit(QUEUE_FLAG_FAST, &zram->queue->queue_flags);
__set_bit(QUEUE_FLAG_FAST, &zram->disk->queue->queue_flags);
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
@ -1086,7 +1253,6 @@ static int create_device(struct zram *zram, int device_id)
pr_warn("Error creating sysfs group");
goto out_free_disk;
}
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram->meta = NULL;
zram->max_comp_streams = 1;
@ -1096,20 +1262,35 @@ out_free_disk:
del_gendisk(zram->disk);
put_disk(zram->disk);
out_free_queue:
blk_cleanup_queue(zram->queue);
blk_cleanup_queue(queue);
out:
return ret;
}
static void destroy_device(struct zram *zram)
static void destroy_devices(unsigned int nr)
{
sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
struct zram *zram;
unsigned int i;
del_gendisk(zram->disk);
put_disk(zram->disk);
for (i = 0; i < nr; i++) {
zram = &zram_devices[i];
/*
* Remove sysfs first, so no one will perform a disksize
* store while we destroy the devices
*/
sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
blk_cleanup_queue(zram->queue);
zram_reset_device(zram);
blk_cleanup_queue(zram->disk->queue);
del_gendisk(zram->disk);
put_disk(zram->disk);
}
kfree(zram_devices);
unregister_blkdev(zram_major, "zram");
pr_info("Destroyed %u device(s)\n", nr);
}
static int __init zram_init(void)
@ -1119,64 +1300,40 @@ static int __init zram_init(void)
if (num_devices > max_num_devices) {
pr_warn("Invalid value for num_devices: %u\n",
num_devices);
ret = -EINVAL;
goto out;
return -EINVAL;
}
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
pr_warn("Unable to get major number\n");
ret = -EBUSY;
goto out;
return -EBUSY;
}
/* Allocate the device array and initialize each one */
zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
goto unregister;
unregister_blkdev(zram_major, "zram");
return -ENOMEM;
}
for (dev_id = 0; dev_id < num_devices; dev_id++) {
ret = create_device(&zram_devices[dev_id], dev_id);
if (ret)
goto free_devices;
goto out_error;
}
pr_info("Created %u device(s) ...\n", num_devices);
show_mem_notifier_register(&zram_show_mem_notifier_block);
pr_info("Created %u device(s)\n", num_devices);
return 0;
free_devices:
while (dev_id)
destroy_device(&zram_devices[--dev_id]);
kfree(zram_devices);
unregister:
unregister_blkdev(zram_major, "zram");
out:
out_error:
destroy_devices(dev_id);
return ret;
}
static void __exit zram_exit(void)
{
int i;
struct zram *zram;
for (i = 0; i < num_devices; i++) {
zram = &zram_devices[i];
destroy_device(zram);
/*
* Shouldn't access zram->disk after destroy_device
* because destroy_device already released zram->disk.
*/
zram_reset_device(zram, false);
}
unregister_blkdev(zram_major, "zram");
kfree(zram_devices);
pr_debug("Cleanup done!\n");
destroy_devices(num_devices);
}
module_init(zram_init);
@ -1184,7 +1341,10 @@ module_exit(zram_exit);
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of zram devices");
module_param_string(backend, backend_param_buf, BACKEND_PARAM_BUF_SIZE, 0);
MODULE_PARM_DESC(num_devices, "Compression storage (backend) name");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
MODULE_ALIAS("devname:zram");

View File

@ -21,7 +21,7 @@
#define _ZRAM_DRV_H_
#include <linux/spinlock.h>
#include "../../staging/zsmalloc/zsmalloc.h"
#include <linux/zpool.h>
#include "zcomp.h"
@ -37,7 +37,7 @@ static const unsigned max_num_devices = 32;
* Pages that compress to size greater than this are stored
* uncompressed in memory.
*/
static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
static const size_t max_zpage_size = PAGE_SIZE / 10 * 9;
/*
* NOTE: max_zpage_size must be less than or equal to:
@ -89,6 +89,7 @@ struct zram_stats {
atomic64_t compr_data_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
atomic64_t num_writes; /* --do-- */
atomic64_t num_migrated; /* no. of migrated object */
atomic64_t failed_reads; /* can happen when memory is too low */
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
@ -100,30 +101,30 @@ struct zram_stats {
struct zram_meta {
struct zram_table_entry *table;
struct zs_pool *mem_pool;
struct zpool *mem_pool;
};
struct zram {
struct zram_meta *meta;
struct request_queue *queue;
struct gendisk *disk;
struct zcomp *comp;
/* Prevent concurrent execution of device init, reset and R/W request */
struct gendisk *disk;
/* Prevent concurrent execution of device init */
struct rw_semaphore init_lock;
/*
* the number of pages zram can consume for storing compressed data
*/
unsigned long limit_pages;
int max_comp_streams;
struct zram_stats stats;
atomic_t refcount; /* refcount for zram_meta */
/* wait all IO under all of cpu are done */
wait_queue_head_t io_done;
/*
* This is the limit on amount of *uncompressed* worth of data
* we can store in a disk.
*/
u64 disksize; /* bytes */
int max_comp_streams;
struct zram_stats stats;
/*
* the number of pages zram can consume for storing compressed data
*/
unsigned long limit_pages;
char compressor[10];
};
#endif

View File

@ -270,7 +270,7 @@ static const struct rfkill_ops bluetooth_power_rfkill_ops = {
.set_block = bluetooth_toggle_radio,
};
#ifdef CONFIG_CNSS
#ifdef CONFIG_CNSS_PCI
static ssize_t enable_extldo(struct device *dev, struct device_attribute *attr,
char *buf)
{

View File

@ -67,7 +67,7 @@ static int h4_open(struct hci_uart *hu)
{
struct h4_struct *h4;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
h4 = kzalloc(sizeof(*h4), GFP_KERNEL);
if (!h4)
@ -84,7 +84,7 @@ static int h4_flush(struct hci_uart *hu)
{
struct h4_struct *h4 = hu->priv;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
skb_queue_purge(&h4->txq);
@ -98,7 +98,7 @@ static int h4_close(struct hci_uart *hu)
hu->priv = NULL;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
skb_queue_purge(&h4->txq);
@ -115,7 +115,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct h4_struct *h4 = hu->priv;
BT_DBG("hu %p skb %p", hu, skb);
BT_DBG("hu %pK skb %pK", hu, skb);
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);

View File

@ -231,7 +231,7 @@ static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
struct ibs_struct *ibs = hu->priv;
struct hci_ibs_cmd *hci_ibs_packet;
BT_DBG("hu %p cmd 0x%x", hu, cmd);
BT_DBG("hu %pK cmd 0x%x", hu, cmd);
/* allocate packet */
skb = bt_skb_alloc(1, GFP_ATOMIC);
@ -259,7 +259,7 @@ static void ibs_wq_awake_device(struct work_struct *work)
struct hci_uart *hu = (struct hci_uart *)ibs->ibs_hu;
unsigned long flags;
BT_DBG(" %p ", hu);
BT_DBG(" %pK ", hu);
/* Vote for serial clock */
ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
@ -286,7 +286,7 @@ static void ibs_wq_awake_rx(struct work_struct *work)
struct hci_uart *hu = (struct hci_uart *)ibs->ibs_hu;
unsigned long flags;
BT_DBG(" %p ", hu);
BT_DBG(" %pK ", hu);
ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
@ -314,7 +314,7 @@ static void ibs_wq_serial_rx_clock_vote_off(struct work_struct *work)
ws_rx_vote_off);
struct hci_uart *hu = (struct hci_uart *)ibs->ibs_hu;
BT_DBG(" %p ", hu);
BT_DBG(" %pK ", hu);
ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
@ -326,7 +326,7 @@ static void ibs_wq_serial_tx_clock_vote_off(struct work_struct *work)
ws_tx_vote_off);
struct hci_uart *hu = (struct hci_uart *)ibs->ibs_hu;
BT_DBG(" %p ", hu);
BT_DBG(" %pK ", hu);
hci_uart_tx_wakeup(hu); /* run HCI tx handling unlocked */
@ -342,7 +342,7 @@ static void hci_ibs_tx_idle_timeout(unsigned long arg)
struct ibs_struct *ibs = hu->priv;
unsigned long flags;
BT_DBG("hu %p idle timeout in %lu state", hu, ibs->tx_ibs_state);
BT_DBG("hu %pK idle timeout in %lu state", hu, ibs->tx_ibs_state);
spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
flags, SINGLE_DEPTH_NESTING);
@ -376,8 +376,8 @@ static void hci_ibs_wake_retrans_timeout(unsigned long arg)
unsigned long flags;
unsigned long retransmit = 0;
BT_DBG("hu %p wake retransmit timeout in %lu state",
hu, ibs->tx_ibs_state);
BT_DBG("hu %pK wake retransmit timeout in %lu state",
hu, ibs->tx_ibs_state);
spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
flags, SINGLE_DEPTH_NESTING);
@ -409,7 +409,7 @@ static int ibs_open(struct hci_uart *hu)
{
struct ibs_struct *ibs;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
ibs = kzalloc(sizeof(*ibs), GFP_ATOMIC);
if (!ibs)
@ -505,7 +505,7 @@ static int ibs_flush(struct hci_uart *hu)
{
struct ibs_struct *ibs = hu->priv;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
skb_queue_purge(&ibs->tx_wait_q);
skb_queue_purge(&ibs->txq);
@ -518,7 +518,7 @@ static int ibs_close(struct hci_uart *hu)
{
struct ibs_struct *ibs = hu->priv;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
ibs_msm_serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
ibs_log_local_stats(ibs);
@ -547,7 +547,7 @@ static void ibs_device_want_to_wakeup(struct hci_uart *hu)
unsigned long flags;
struct ibs_struct *ibs = hu->priv;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
/* lock hci_ibs state */
spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
@ -596,7 +596,7 @@ static void ibs_device_want_to_sleep(struct hci_uart *hu)
unsigned long flags;
struct ibs_struct *ibs = hu->priv;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
/* lock hci_ibs state */
spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
@ -632,7 +632,7 @@ static void ibs_device_woke_up(struct hci_uart *hu)
struct ibs_struct *ibs = hu->priv;
struct sk_buff *skb = NULL;
BT_DBG("hu %p", hu);
BT_DBG("hu %pK", hu);
/* lock hci_ibs state */
spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
@ -677,7 +677,7 @@ static int ibs_enqueue(struct hci_uart *hu, struct sk_buff *skb)
unsigned long flags = 0;
struct ibs_struct *ibs = hu->priv;
BT_DBG("hu %p skb %p", hu, skb);
BT_DBG("hu %pK skb %pK", hu, skb);
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@ -756,8 +756,8 @@ static int ibs_recv(struct hci_uart *hu, void *data, int count)
struct hci_sco_hdr *sh;
register int len, type, dlen;
BT_DBG("hu %p count %d rx_state %ld rx_count %ld",
hu, count, ibs->rx_state, ibs->rx_count);
BT_DBG("hu %pK count %d rx_state %ld rx_count %ld",
hu, count, ibs->rx_state, ibs->rx_count);
ptr = data;
while (count) {

View File

@ -200,7 +200,7 @@ int hci_uart_init_ready(struct hci_uart *hu)
/* Initialize device */
static int hci_uart_open(struct hci_dev *hdev)
{
BT_DBG("%s %p", hdev->name, hdev);
BT_DBG("%s %pK", hdev->name, hdev);
/* Nothing to do for UART driver */
@ -215,7 +215,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
struct tty_struct *tty = hu->tty;
BT_DBG("hdev %p tty %p", hdev, tty);
BT_DBG("hdev %pK tty %pK", hdev, tty);
if (hu->tx_skb) {
kfree_skb(hu->tx_skb); hu->tx_skb = NULL;
@ -234,7 +234,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
/* Close device */
static int hci_uart_close(struct hci_dev *hdev)
{
BT_DBG("hdev %p", hdev);
BT_DBG("hdev %pK", hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
@ -283,7 +283,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
{
struct hci_uart *hu;
BT_DBG("tty %p", tty);
BT_DBG("tty %pK", tty);
/* Error if the tty has no write op instead of leaving an exploitable
hole */
@ -326,7 +326,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
struct hci_uart *hu = (void *)tty->disc_data;
struct hci_dev *hdev;
BT_DBG("tty %p", tty);
BT_DBG("tty %pK", tty);
/* Detach from the tty */
tty->disc_data = NULL;

View File

@ -98,8 +98,9 @@ static int compat_get_fastrpc_ioctl_invoke(
if (err)
return -EFAULT;
inv->inv.pra = (union remote_arg *)(inv + 1);
err = put_user(sc, &inv->inv.sc);
pra = (union remote_arg *)(inv + 1);
err = put_user(pra, &inv->inv.pra);
err |= put_user(sc, &inv->inv.sc);
err |= get_user(u, &inv32->inv.handle);
err |= put_user(u, &inv->inv.handle);
err |= get_user(p, &inv32->inv.pra);
@ -107,12 +108,11 @@ static int compat_get_fastrpc_ioctl_invoke(
return err;
pra32 = compat_ptr(p);
pra = inv->inv.pra;
pra = (union remote_arg *)(inv + 1);
num = REMOTE_SCALARS_INBUFS(sc) + REMOTE_SCALARS_OUTBUFS(sc);
for (j = 0; j < num; j++) {
err |= get_user(p, &pra32[j].buf.pv);
pra[j].buf.pv = NULL;
err |= put_user(p, (compat_uptr_t *)&pra[j].buf.pv);
err |= put_user(p, (uintptr_t *)&pra[j].buf.pv);
err |= get_user(s, &pra32[j].buf.len);
err |= put_user(s, &pra[j].buf.len);
}
@ -121,7 +121,7 @@ static int compat_get_fastrpc_ioctl_invoke(
err |= put_user(u, &pra[num + j].h);
}
inv->fds = NULL;
err |= put_user(NULL, &inv->fds);
if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_FD) {
err |= get_user(p, &inv32->fds);
err |= put_user(p, (compat_uptr_t *)&inv->fds);
@ -173,8 +173,7 @@ static int compat_get_fastrpc_ioctl_mmap(
err |= get_user(u, &map32->flags);
err |= put_user(u, &map->flags);
err |= get_user(p, &map32->vaddrin);
map->vaddrin = NULL;
err |= put_user(p, (compat_uptr_t *)&map->vaddrin);
err |= put_user(p, (uintptr_t *)&map->vaddrin);
err |= get_user(s, &map32->size);
err |= put_user(s, &map->size);

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -409,7 +409,7 @@ static int diag_process_single_dci_pkt(unsigned char *buf, int len,
uint8_t cmd_code = 0;
if (!buf || len < 0) {
pr_err("diag: Invalid input in %s, buf: %p, len: %d\n",
pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
__func__, buf, len);
return -EIO;
}
@ -755,7 +755,7 @@ static int diag_dci_remove_req_entry(unsigned char *buf, int len,
{
uint16_t rsp_count = 0, delayed_rsp_id = 0;
if (!buf || len <= 0 || !entry) {
pr_err("diag: In %s, invalid input buf: %p, len: %d, entry: %p\n",
pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
__func__, buf, len, entry);
return -EIO;
}
@ -809,7 +809,7 @@ static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
int peripheral_mask, status;
if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
pr_err("diag: In %s, invalid buf %p or length: %d\n",
pr_err("diag: In %s, invalid buf %pK or length: %d\n",
__func__, buf, len);
return;
}
@ -1961,7 +1961,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
mutex_unlock(&driver->dci_mutex);
return -ENOMEM;
}
pr_debug("diag: head of dci log mask %p\n", head_log_mask_ptr);
pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
count = 0; /* iterator for extracting log codes */
while (count < num_codes) {
@ -1991,7 +1991,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
if (*log_mask_ptr == equip_id) {
found = 1;
pr_debug("diag: find equip id = %x at %p\n",
pr_debug("diag: find equip id = %x at %pK\n",
equip_id, log_mask_ptr);
break;
} else {
@ -2075,7 +2075,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
mutex_unlock(&driver->dci_mutex);
return -ENOMEM;
}
pr_debug("diag: head of dci event mask %p\n", event_mask_ptr);
pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
count = 0; /* iterator for extracting log codes */
while (count < num_codes) {
if (read_len >= USER_SPACE_DATA) {
@ -3131,7 +3131,7 @@ int diag_dci_write_proc(int peripheral, int pkt_type, char *buf, int len)
if (!buf || (peripheral < 0 || peripheral >= NUM_SMD_DCI_CHANNELS)
|| !driver->rcvd_feature_mask[peripheral] || len < 0) {
pr_err("diag: In %s, invalid data 0x%p, peripheral: %d, len: %d\n",
pr_err("diag: In %s, invalid data 0x%pK, peripheral: %d, len: %d\n",
__func__, buf, peripheral, len);
return -EINVAL;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -50,26 +50,26 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
}
buf_size = ksize(buf);
ret = scnprintf(buf, buf_size,
"modem ch: 0x%p\n"
"lpass ch: 0x%p\n"
"riva ch: 0x%p\n"
"sensors ch: 0x%p\n"
"modem dci ch: 0x%p\n"
"lpass dci ch: 0x%p\n"
"riva dci ch: 0x%p\n"
"sensors dci ch: 0x%p\n"
"modem cntl_ch: 0x%p\n"
"lpass cntl_ch: 0x%p\n"
"riva cntl_ch: 0x%p\n"
"sensors cntl_ch: 0x%p\n"
"modem cmd ch: 0x%p\n"
"adsp cmd ch: 0x%p\n"
"riva cmd ch: 0x%p\n"
"sensors cmd ch: 0x%p\n"
"modem dci cmd ch: 0x%p\n"
"lpass dci cmd ch: 0x%p\n"
"riva dci cmd ch: 0x%p\n"
"sensors dci cmd ch: 0x%p\n"
"modem ch: 0x%pK\n"
"lpass ch: 0x%pK\n"
"riva ch: 0x%pK\n"
"sensors ch: 0x%pK\n"
"modem dci ch: 0x%pK\n"
"lpass dci ch: 0x%pK\n"
"riva dci ch: 0x%pK\n"
"sensors dci ch: 0x%pK\n"
"modem cntl_ch: 0x%pK\n"
"lpass cntl_ch: 0x%pK\n"
"riva cntl_ch: 0x%pK\n"
"sensors cntl_ch: 0x%pK\n"
"modem cmd ch: 0x%pK\n"
"adsp cmd ch: 0x%pK\n"
"riva cmd ch: 0x%pK\n"
"sensors cmd ch: 0x%pK\n"
"modem dci cmd ch: 0x%pK\n"
"lpass dci cmd ch: 0x%pK\n"
"riva dci cmd ch: 0x%pK\n"
"sensors dci cmd ch: 0x%pK\n"
"CPU Tools id: %d\n"
"Apps only: %d\n"
"Apps master: %d\n"
@ -723,7 +723,7 @@ static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf,
bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
"id: %d\n"
"name: %s\n"
"hdl: %p\n"
"hdl: %pK\n"
"connected: %d\n"
"enabled: %d\n"
"mempool: %s\n"
@ -865,9 +865,9 @@ static ssize_t diag_dbgfs_read_mhiinfo(struct file *file, char __user *ubuf,
"bridge index: %s\n"
"mempool: %s\n"
"read ch opened: %d\n"
"read ch hdl: %p\n"
"read ch hdl: %pK\n"
"write ch opened: %d\n"
"write ch hdl: %p\n"
"write ch hdl: %pK\n"
"read work pending: %d\n"
"read done work pending: %d\n"
"open work pending: %d\n"
@ -936,9 +936,9 @@ static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
"type: %d\n"
"inited: %d\n"
"ctxt: %d\n"
"dev_ops: %p\n"
"dci_read_buf: %p\n"
"dci_read_ptr: %p\n"
"dev_ops: %pK\n"
"dci_read_buf: %pK\n"
"dci_read_ptr: %pK\n"
"dci_read_len: %d\n\n",
info->id,
info->name,

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2008-2015 The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -388,7 +388,7 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
struct diag_ssid_range_t ssid_range;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -432,7 +432,7 @@ static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
struct diag_msg_build_mask_t rsp;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -485,7 +485,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
struct diag_msg_build_mask_t rsp;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -534,10 +534,11 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
struct diag_msg_mask_t *mask = NULL;
struct diag_msg_build_mask_t *req = NULL;
struct diag_msg_build_mask_t rsp;
struct diag_msg_mask_t *mask_next = NULL;
uint32_t *temp = NULL;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -547,10 +548,18 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
mutex_lock(&msg_mask.lock);
mask = (struct diag_msg_mask_t *)msg_mask.ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
mask_next = mask;
mask_next++;
} else
mask_next = NULL;
if ((req->ssid_first < mask->ssid_first) ||
(req->ssid_first > mask->ssid_last_tools)) {
(req->ssid_first > mask->ssid_first + MAX_SSID_PER_RANGE) ||
(mask_next && (req->ssid_first >= mask_next->ssid_first))) {
continue;
}
mask_next = NULL;
found = 1;
mask_size = req->ssid_last - req->ssid_first + 1;
if (mask_size > MAX_SSID_PER_RANGE) {
@ -566,8 +575,10 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
pr_debug("diag: Msg SSID range mismatch\n");
if (mask_size != MAX_SSID_PER_RANGE)
mask->ssid_last_tools = req->ssid_last;
mask->range_tools =
mask->ssid_last_tools - mask->ssid_first + 1;
temp = krealloc(mask->ptr,
mask_size * sizeof(uint32_t),
mask->range_tools * sizeof(uint32_t),
GFP_KERNEL);
if (!temp) {
pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
@ -575,7 +586,6 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
return -ENOMEM;
}
mask->ptr = temp;
mask->range_tools = mask_size;
}
offset = req->ssid_first - mask->ssid_first;
@ -630,7 +640,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -676,7 +686,7 @@ static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
struct diag_event_mask_config_t rsp;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -714,7 +724,7 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
struct diag_event_mask_config_t *req;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -761,7 +771,7 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
struct diag_event_report_t header;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -807,7 +817,7 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
struct diag_log_config_rsp_t rsp;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -877,7 +887,7 @@ static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
return 0;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -916,7 +926,7 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
unsigned char *temp_buf = NULL;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -1010,7 +1020,7 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
int i;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -141,7 +141,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
if (ch->tbl[i].buf != buf)
continue;
found = 1;
pr_err_ratelimited("diag: trying to write the same buffer buf: %p, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
buf, ctx, ch->tbl[i].len,
i, id, driver->logging_mode);
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -692,7 +692,7 @@ static int diag_process_userspace_remote(int proc, void *buf, int len)
int bridge_index = proc - 1;
if (!buf || len < 0) {
pr_err("diag: Invalid input in %s, buf: %p, len: %d\n",
pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
__func__, buf, len);
return -EINVAL;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2008-2009, 2012-2013, The Linux Foundation.
/* Copyright (c) 2008-2009, 2012-2013, 2016, The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@ -242,7 +242,7 @@ int crc_check(uint8_t *buf, uint16_t len)
* of data and 3 bytes for CRC
*/
if (!buf || len < 4) {
pr_err_ratelimited("diag: In %s, invalid packet or length, buf: 0x%p, len: %d",
pr_err_ratelimited("diag: In %s, invalid packet or length, buf: 0x%pK, len: %d",
__func__, buf, len);
return -EIO;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -810,7 +810,7 @@ void diag_update_pkt_buffer(unsigned char *buf, int type)
}
if (!ptr || length == 0) {
pr_err("diag: Invalid ptr %p and length %d in %s",
pr_err("diag: Invalid ptr %pK and length %d in %s",
ptr, length, __func__);
return;
}
@ -923,7 +923,7 @@ int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
int i;
if (!buf || !dest_buf) {
pr_err("diag: Invalid pointers buf: %p, dest_buf %p in %s\n",
pr_err("diag: Invalid pointers buf: %pK, dest_buf %pK in %s\n",
buf, dest_buf, __func__);
return -EIO;
}
@ -1011,7 +1011,7 @@ int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
return 0;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
pr_err("diag: Invalid input in %s, src_buf: %p, src_len: %d, dest_buf: %p, dest_len: %d",
pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
__func__, src_buf, src_len, dest_buf, dest_len);
return -EINVAL;
}
@ -2037,7 +2037,7 @@ int diag_smd_write(struct diag_smd_info *smd_info, void *buf, int len)
int max_retries = 3;
if (!smd_info || !buf || len <= 0) {
pr_err_ratelimited("diag: In %s, invalid params, smd_info: %p, buf: %p, len: %d\n",
pr_err_ratelimited("diag: In %s, invalid params, smd_info: %pK, buf: %pK, len: %d\n",
__func__, smd_info, buf, len);
return -EINVAL;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -133,7 +133,7 @@ int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops)
char wq_name[DIAG_BRIDGE_NAME_SZ + 10];
if (!ops) {
pr_err("diag: Invalid pointers ops: %p ctxt: %d\n", ops, ctxt);
pr_err("diag: Invalid pointers ops: %pK ctxt: %d\n", ops, ctxt);
return -EINVAL;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -821,7 +821,7 @@ static int __diag_send_diag_mode_update_by_smd(struct diag_smd_info *smd_info,
int err = 0;
if (!smd_info || smd_info->type != SMD_CNTL_TYPE) {
pr_err("diag: In %s, invalid channel info, smd_info: %p type: %d\n",
pr_err("diag: In %s, invalid channel info, smd_info: %pK type: %d\n",
__func__, smd_info,
((smd_info) ? smd_info->type : -1));
return -EIO;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -354,7 +354,7 @@ static int hsic_write(int id, unsigned char *buf, int len, int ctxt)
return -EINVAL;
}
if (!buf || len <= 0) {
pr_err_ratelimited("diag: In %s, ch %d, invalid buf %p len %d\n",
pr_err_ratelimited("diag: In %s, ch %d, invalid buf %pK len %d\n",
__func__, id, buf, len);
return -EINVAL;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -133,7 +133,7 @@ static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
item = kzalloc(sizeof(struct diag_mhi_buf_tbl_t), GFP_KERNEL);
if (!item) {
pr_err_ratelimited("diag: In %s, unable to allocate new item for buf tbl, ch: %p, type: %d, buf: %p, len: %d\n",
pr_err_ratelimited("diag: In %s, unable to allocate new item for buf tbl, ch: %pK, type: %d, buf: %pK, len: %d\n",
__func__, ch, ch->type, buf, len);
return -ENOMEM;
}
@ -187,7 +187,7 @@ static void mhi_buf_tbl_remove(struct diag_mhi_info *mhi_info, int type,
spin_unlock_irqrestore(&ch->lock, flags);
if (!found) {
pr_err_ratelimited("diag: In %s, unable to find buffer, ch: %p, type: %d, buf: %p\n",
pr_err_ratelimited("diag: In %s, unable to find buffer, ch: %pK, type: %d, buf: %pK\n",
__func__, ch, ch->type, buf);
}
}
@ -443,7 +443,7 @@ static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
}
if (!buf || len <= 0) {
pr_err("diag: In %s, ch %d, invalid buf %p len %d\n",
pr_err("diag: In %s, ch %d, invalid buf %pK len %d\n",
__func__, id, buf, len);
return -EINVAL;
}
@ -473,7 +473,7 @@ static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
err = mhi_queue_xfer(ch->hdl, dma_addr, len, flags);
if (err) {
pr_err_ratelimited("diag: In %s, cannot write to MHI channel %p, len %d, err: %d\n",
pr_err_ratelimited("diag: In %s, cannot write to MHI channel %pK, len %d, err: %d\n",
__func__, diag_mhi[id].name, len, err);
dma_unmap_single(NULL, (dma_addr_t)dma_addr, len,
DMA_TO_DEVICE);

View File

@ -359,6 +359,7 @@ static long smd_pkt_ioctl(struct file *file, unsigned int cmd,
{
int ret;
struct smd_pkt_dev *smd_pkt_devp;
uint32_t val;
smd_pkt_devp = file->private_data;
if (!smd_pkt_devp)
@ -372,9 +373,15 @@ static long smd_pkt_ioctl(struct file *file, unsigned int cmd,
ret = smd_tiocmget(smd_pkt_devp->ch);
break;
case TIOCMSET:
D_STATUS("%s TIOCSET command on smd_pkt_dev id:%d\n",
__func__, smd_pkt_devp->i);
ret = smd_tiocmset(smd_pkt_devp->ch, arg, ~arg);
ret = get_user(val, (uint32_t *)arg);
if (ret) {
pr_err("Error getting TIOCMSET value\n");
mutex_unlock(&smd_pkt_devp->ch_lock);
return ret;
}
D_STATUS("%s TIOCSET command on smd_pkt_dev id:%d arg[0x%x]\n",
__func__, smd_pkt_devp->i, val);
ret = smd_tiocmset(smd_pkt_devp->ch, val, ~val);
break;
case SMD_PKT_IOCTL_BLOCKING_WRITE:
ret = get_user(smd_pkt_devp->blocking_write, (int *)arg);

View File

@ -1502,6 +1502,28 @@ unsigned int get_random_int(void)
}
EXPORT_SYMBOL(get_random_int);
/*
* Same as get_random_int(), but returns unsigned long.
*/
unsigned long get_random_long(void)
{
__u32 *hash;
unsigned long ret;
if (arch_get_random_long(&ret))
return ret;
hash = get_cpu_var(get_random_int_hash);
hash[0] += current->pid + jiffies + get_cycles();
md5_transform(hash, random_int_secret);
ret = *(unsigned long *)hash;
put_cpu_var(get_random_int_hash);
return ret;
}
EXPORT_SYMBOL(get_random_long);
/*
* randomize_range() returns a start address such that
*

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2013,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -195,8 +195,6 @@ static int csr_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
/* Store the driver data pointer for use in exported functions */
csrdrvdata = drvdata;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
@ -227,6 +225,9 @@ static int csr_probe(struct platform_device *pdev)
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
/* Store the driver data pointer for use in exported functions */
csrdrvdata = drvdata;
dev_info(dev, "CSR initialized\n");
return 0;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -305,8 +305,6 @@ static int fuse_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
/* Store the driver data pointer for use in exported functions */
fusedrvdata = drvdata;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
@ -370,6 +368,9 @@ static int fuse_probe(struct platform_device *pdev)
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
/* Store the driver data pointer for use in exported functions */
fusedrvdata = drvdata;
dev_info(dev, "Fuse initialized\n");
return 0;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -868,8 +868,6 @@ static int stm_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
/* Store the driver data pointer for use in exported functions */
stmdrvdata = drvdata;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
@ -948,6 +946,9 @@ static int stm_probe(struct platform_device *pdev)
if (boot_enable)
coresight_enable(drvdata->csdev);
/* Store the driver data pointer for use in exported functions */
stmdrvdata = drvdata;
return 0;
err:
coresight_unregister(drvdata->csdev);

View File

@ -2036,6 +2036,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min;
policy->max = new_policy->max;
trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
pr_debug("new min and max freqs are %u - %u kHz\n",
policy->min, policy->max);

View File

@ -1,6 +1,6 @@
/* Qualcomm Crypto Engine driver.
*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2010-2014,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1952,8 +1952,8 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
else
q_req->cryptlen = areq->cryptlen - authsize;
if ((q_req->cryptlen > ULONG_MAX - ivsize) ||
(q_req->cryptlen + ivsize > ULONG_MAX - areq->assoclen)) {
if ((q_req->cryptlen > UINT_MAX - ivsize) ||
(q_req->cryptlen + ivsize > UINT_MAX - areq->assoclen)) {
pr_err("Integer overflow on total aead req length.\n");
return -EINVAL;
}

View File

@ -1,6 +1,6 @@
/* Qualcomm CE device driver.
*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1245,44 +1245,6 @@ static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
struct qcedev_cipher_op_req *saved_req;
struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
/* Verify Source Address's */
for (i = 0; i < areq->cipher_op_req.entries; i++)
if (!access_ok(VERIFY_READ,
(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
areq->cipher_op_req.vbuf.src[i].len))
return -EFAULT;
/* Verify Destination Address's */
if (creq->in_place_op != 1) {
for (i = 0, total = 0; i < QCEDEV_MAX_BUFFERS; i++) {
if ((areq->cipher_op_req.vbuf.dst[i].vaddr != 0) &&
(total < creq->data_len)) {
if (!access_ok(VERIFY_WRITE,
(void __user *)creq->vbuf.dst[i].vaddr,
creq->vbuf.dst[i].len)) {
pr_err("%s:DST WR_VERIFY err %d=0x%lx\n",
__func__, i, (uintptr_t)
creq->vbuf.dst[i].vaddr);
return -EFAULT;
}
total += creq->vbuf.dst[i].len;
}
}
} else {
for (i = 0, total = 0; i < creq->entries; i++) {
if (total < creq->data_len) {
if (!access_ok(VERIFY_WRITE,
(void __user *)creq->vbuf.src[i].vaddr,
creq->vbuf.src[i].len)) {
pr_err("%s:SRC WR_VERIFY err %d=0x%lx\n",
__func__, i, (uintptr_t)
creq->vbuf.src[i].vaddr);
return -EFAULT;
}
total += creq->vbuf.src[i].len;
}
}
}
total = 0;
if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
@ -1554,7 +1516,7 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all dst length is equal to data_len */
for (i = 0; i < req->entries; i++) {
if (req->vbuf.dst[i].len >= ULONG_MAX - total) {
if (req->vbuf.dst[i].len >= U32_MAX - total) {
pr_err("%s: Integer overflow on total req dst vbuf length\n",
__func__);
goto error;
@ -1568,7 +1530,7 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
if (req->vbuf.src[i].len > ULONG_MAX - total) {
if (req->vbuf.src[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req src vbuf length\n",
__func__);
goto error;
@ -1580,6 +1542,36 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
__func__, total, req->data_len);
goto error;
}
/* Verify Source Address's */
for (i = 0, total = 0; i < req->entries; i++) {
if (total < req->data_len) {
if (!access_ok(VERIFY_READ,
(void __user *)req->vbuf.src[i].vaddr,
req->vbuf.src[i].len)) {
pr_err("%s:SRC RD_VERIFY err %d=0x%lx\n",
__func__, i, (uintptr_t)
req->vbuf.src[i].vaddr);
goto error;
}
total += req->vbuf.src[i].len;
}
}
/* Verify Destination Address's */
for (i = 0, total = 0; i < QCEDEV_MAX_BUFFERS; i++) {
if ((req->vbuf.dst[i].vaddr != 0) &&
(total < req->data_len)) {
if (!access_ok(VERIFY_WRITE,
(void __user *)req->vbuf.dst[i].vaddr,
req->vbuf.dst[i].len)) {
pr_err("%s:DST WR_VERIFY err %d=0x%lx\n",
__func__, i, (uintptr_t)
req->vbuf.dst[i].vaddr);
goto error;
}
total += req->vbuf.dst[i].len;
}
}
return 0;
error:
return -EINVAL;
@ -1630,7 +1622,7 @@ static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
if (req->data[i].len > ULONG_MAX - total) {
if (req->data[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req buf length\n",
__func__);
goto sha_error;

View File

@ -1885,12 +1885,12 @@ static int _qcrypto_process_aead(struct crypto_engine *pengine,
* include assoicated data, ciphering data stream,
* generated MAC, and CCM padding.
*/
if ((MAX_ALIGN_SIZE * 2 > ULONG_MAX - req->assoclen) ||
if ((MAX_ALIGN_SIZE * 2 > UINT_MAX - req->assoclen) ||
((MAX_ALIGN_SIZE * 2 + req->assoclen) >
ULONG_MAX - qreq.ivsize) ||
UINT_MAX - qreq.ivsize) ||
((MAX_ALIGN_SIZE * 2 + req->assoclen
+ qreq.ivsize)
> ULONG_MAX - req->cryptlen)) {
> UINT_MAX - req->cryptlen)) {
pr_err("Integer overflow on aead req length.\n");
return -EINVAL;
}

Some files were not shown because too many files have changed in this diff Show More