32.2.A.0.224

This commit is contained in:
Olivier Karasangabo 2016-06-03 01:02:53 +02:00
parent 7d32cb1e14
commit 9c2e3b167d
47 changed files with 270 additions and 1775 deletions

View File

@ -408,6 +408,11 @@
15 01 00 00 00 00 02 FB 01
15 01 00 00 00 00 02 35 00
39 01 00 00 00 00 03 44 00 00
15 01 00 00 00 00 02 FF 05
15 01 00 00 00 00 02 FB 01
15 01 00 00 00 00 02 0B 68
15 01 00 00 00 00 02 FF 00
15 01 00 00 00 00 02 FB 01
05 01 00 00 64 00 01 11];
qcom,mdss-dsi-on-command = [05 01 00 00 00 00 01 29];
qcom,mdss-dsi-off-command = [05 01 00 00 14 00 01 28

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2012, 2014,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -208,9 +208,6 @@ static unsigned int get_krait_evtinfo(unsigned int krait_evt_type,
code = (krait_evt_type & 0x00FF0) >> 4;
group = krait_evt_type & 0x0000F;
if ((group > 3) || (reg > KRAIT_MAX_L1_REG))
return -EINVAL;
if (prefix != KRAIT_EVT_PREFIX && prefix != KRAIT_VENUMEVT_PREFIX)
return -EINVAL;
@ -221,6 +218,9 @@ static unsigned int get_krait_evtinfo(unsigned int krait_evt_type,
reg += VENUM_BASE_OFFSET;
}
if ((group > 3) || (reg > KRAIT_MAX_L1_REG))
return -EINVAL;
evtinfo->group_setval = 0x80000000 | (code << (group * 8));
evtinfo->groupcode = reg;
evtinfo->armv7_evt_type = evt_type_base[reg] | group;

View File

@ -14,8 +14,6 @@ CONFIG_CIFS=y
# CONFIG_CORESIGHT is not set
CONFIG_CRASH_NOTES=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEVKMEM is not set
# CONFIG_DEVMEM is not set
CONFIG_DYNAMIC_DEBUG=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
@ -73,7 +71,6 @@ CONFIG_SONY_CAM_V4L2=y
CONFIG_SONY_SSM=y
CONFIG_STRICT_DEVMEM=y
CONFIG_TOUCHSCREEN_CLEARPAD=y
CONFIG_UID_CPUTIME=y
# CONFIG_USB_ACM is not set
# CONFIG_USB_CCID_BRIDGE is not set
# CONFIG_USB_CI13XXX_MSM is not set

View File

@ -90,6 +90,7 @@ extern pgprot_t pgprot_default;
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
#endif /* __ASSEMBLY__ */
@ -97,7 +98,7 @@ extern pgprot_t pgprot_default;
#define __P001 __PAGE_READONLY
#define __P010 __PAGE_COPY
#define __P011 __PAGE_COPY
#define __P100 __PAGE_READONLY_EXEC
#define __P100 __PAGE_EXECONLY
#define __P101 __PAGE_READONLY_EXEC
#define __P110 __PAGE_COPY_EXEC
#define __P111 __PAGE_COPY_EXEC
@ -106,7 +107,7 @@ extern pgprot_t pgprot_default;
#define __S001 __PAGE_READONLY
#define __S010 __PAGE_SHARED
#define __S011 __PAGE_SHARED
#define __S100 __PAGE_READONLY_EXEC
#define __S100 __PAGE_EXECONLY
#define __S101 __PAGE_READONLY_EXEC
#define __S110 __PAGE_SHARED_EXEC
#define __S111 __PAGE_SHARED_EXEC
@ -143,6 +144,8 @@ extern struct page *empty_zero_page;
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_ng(pte) \
((pte_val(pte) & (PTE_VALID | PTE_NG)) == (PTE_VALID | PTE_NG))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
@ -221,7 +224,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (pte_valid_user(pte)) {
if (pte_valid_ng(pte)) {
if (!pte_special(pte) && pte_exec(pte))
__sync_icache_dcache(pte, addr);
if (pte_dirty(pte) && pte_write(pte))

View File

@ -178,8 +178,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
good_area:
/*
* Check that the permissions on the VMA allow for the fault which
* occurred. If we encountered a write or exec fault, we must have
* appropriate permissions, otherwise we allow any permission.
* occurred.
*/
if (!(vma->vm_flags & vm_flags)) {
fault = VM_FAULT_BADACCESS;
@ -201,7 +200,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
tsk = current;

View File

@ -342,6 +342,7 @@ void diag_dci_wakeup_clients()
struct list_head *start, *temp;
struct diag_dci_client_tbl *entry = NULL;
mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
@ -357,6 +358,7 @@ void diag_dci_wakeup_clients()
DCI_DATA_TYPE);
}
}
mutex_unlock(&driver->dci_mutex);
}
void dci_data_drain_work_fn(struct work_struct *work)
@ -367,6 +369,7 @@ void dci_data_drain_work_fn(struct work_struct *work)
struct diag_dci_buf_peripheral_t *proc_buf = NULL;
struct diag_dci_buffer_t *buf_temp = NULL;
mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
for (i = 0; i < entry->num_buffers; i++) {
@ -396,6 +399,7 @@ void dci_data_drain_work_fn(struct work_struct *work)
DCI_DATA_TYPE);
}
}
mutex_unlock(&driver->dci_mutex);
dci_timer_in_progress = 0;
}
@ -561,6 +565,8 @@ start:
buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
}
end:
if (err)
return err;
/* wake up all sleeping DCI clients which have some data */
diag_dci_wakeup_clients();
dci_check_drain_timer();
@ -621,6 +627,8 @@ int diag_process_smd_dci_read_data(struct diag_smd_info *smd_info, void *buf,
buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
}
if (err)
return err;
/* wake up all sleeping DCI clients which have some data */
diag_dci_wakeup_clients();
dci_check_drain_timer();
@ -719,7 +727,6 @@ static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
if (!entry)
return NULL;
mutex_lock(&driver->dci_mutex);
driver->dci_tag++;
entry->client_id = client_id;
entry->uid = uid;
@ -727,7 +734,6 @@ static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
entry->client_id, entry->uid, entry->tag);
list_add_tail(&entry->track, &driver->dci_req_list);
mutex_unlock(&driver->dci_mutex);
return entry;
}
@ -967,9 +973,11 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
return;
}
mutex_lock(&driver->dci_mutex);
req_entry = diag_dci_get_request_entry(tag);
if (!req_entry) {
pr_err("diag: No matching client for DCI data\n");
pr_err_ratelimited("diag: No matching client for DCI data\n");
mutex_unlock(&driver->dci_mutex);
return;
}
@ -977,18 +985,17 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
if (!entry) {
pr_err("diag: In %s, couldn't find client entry, id:%d\n",
__func__, req_entry->client_id);
mutex_unlock(&driver->dci_mutex);
return;
}
save_req_uid = req_entry->uid;
/* Remove the headers and send only the response to this function */
mutex_lock(&driver->dci_mutex);
delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
if (delete_flag < 0) {
mutex_unlock(&driver->dci_mutex);
return;
}
mutex_unlock(&driver->dci_mutex);
rsp_buf = entry->buffers[data_source].buf_cmd;
@ -1006,6 +1013,7 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
if (!temp_buf) {
pr_err("diag: DCI realloc failed\n");
mutex_unlock(&rsp_buf->data_mutex);
mutex_unlock(&driver->dci_mutex);
return;
} else {
rsp_buf->data = temp_buf;
@ -1041,6 +1049,7 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
* for log and event buffers to be full
*/
dci_add_buffer_to_list(entry, rsp_buf);
mutex_unlock(&driver->dci_mutex);
}
static void copy_dci_event(unsigned char *buf, int len,
@ -1176,6 +1185,7 @@ void extract_dci_events(unsigned char *buf, int len, int data_source, int token)
the event data */
total_event_len = 2 + 10 + payload_len_field + payload_len;
/* parse through event mask tbl of each client and check mask */
mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl,
track);
@ -1187,6 +1197,7 @@ void extract_dci_events(unsigned char *buf, int len, int data_source, int token)
entry, data_source);
}
}
mutex_unlock(&driver->dci_mutex);
}
}
@ -1278,6 +1289,7 @@ void extract_dci_log(unsigned char *buf, int len, int data_source, int token)
}
/* parse through log mask table of each client and check mask */
mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
if (entry->client_info.token != token)
@ -1289,6 +1301,7 @@ void extract_dci_log(unsigned char *buf, int len, int data_source, int token)
copy_dci_log(buf, len, entry, data_source);
}
}
mutex_unlock(&driver->dci_mutex);
}
void diag_update_smd_dci_work_fn(struct work_struct *work)
@ -1768,9 +1781,11 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
return -EIO;
}
mutex_lock(&driver->dci_mutex);
dci_entry = diag_dci_get_client_entry(client_id);
if (!dci_entry) {
pr_err("diag: Invalid client %d in %s\n", client_id, __func__);
mutex_unlock(&driver->dci_mutex);
return DIAG_DCI_NO_REG;
}
@ -1779,6 +1794,7 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
pr_debug("diag: command not supported %d %d %d",
header->cmd_code, header->subsys_id,
header->subsys_cmd_code);
mutex_unlock(&driver->dci_mutex);
return DIAG_DCI_SEND_DATA_FAIL;
}
@ -1786,6 +1802,7 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
if (common_cmd < 0) {
pr_debug("diag: error in checking common command, %d\n",
common_cmd);
mutex_unlock(&driver->dci_mutex);
return DIAG_DCI_SEND_DATA_FAIL;
}
@ -1804,6 +1821,7 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
if (driver->in_busy_dcipktdata) {
pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
__func__);
mutex_unlock(&driver->dci_mutex);
return -EAGAIN;
}
@ -1811,8 +1829,10 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
req_entry = diag_register_dci_transaction(req_uid, client_id);
if (!req_entry) {
pr_alert("diag: registering new DCI transaction failed\n");
mutex_unlock(&driver->dci_mutex);
return DIAG_DCI_NO_REG;
}
mutex_unlock(&driver->dci_mutex);
/*
* If the client has registered for remote data, route the packet to the
@ -1918,9 +1938,11 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
read_len += sizeof(int);
/* find client table entry */
mutex_lock(&driver->dci_mutex);
dci_entry = diag_dci_get_client_entry(client_id);
if (!dci_entry) {
pr_err("diag: In %s, invalid client\n", __func__);
mutex_unlock(&driver->dci_mutex);
return ret;
}
client_token = dci_entry->client_info.token;
@ -1928,6 +1950,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
pr_err("diag: dci: Invalid number of log codes %d\n",
num_codes);
mutex_unlock(&driver->dci_mutex);
return -EIO;
}
@ -1935,6 +1958,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
if (!head_log_mask_ptr) {
pr_err("diag: dci: Invalid Log mask pointer in %s\n",
__func__);
mutex_unlock(&driver->dci_mutex);
return -ENOMEM;
}
pr_debug("diag: head of dci log mask %p\n", head_log_mask_ptr);
@ -1944,6 +1968,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
if (read_len >= USER_SPACE_DATA) {
pr_err("diag: dci: Invalid length for log type in %s",
__func__);
mutex_unlock(&driver->dci_mutex);
return -EIO;
}
log_code = *(uint16_t *)temp;
@ -1952,6 +1977,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
byte_index = item_num/8 + 2;
if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
pr_err("diag: dci: Log type, invalid byte index\n");
mutex_unlock(&driver->dci_mutex);
return ret;
}
byte_mask = 0x01 << (item_num % 8);
@ -1977,6 +2003,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
}
if (!found) {
pr_err("diag: dci equip id not found\n");
mutex_unlock(&driver->dci_mutex);
return ret;
}
*(log_mask_ptr+1) = 1; /* set the dirty byte */
@ -1999,6 +2026,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
/* send updated mask to peripherals */
ret = dci_ops_tbl[client_token].send_log_mask(client_token);
mutex_unlock(&driver->dci_mutex);
} else if (*(int *)temp == DCI_EVENT_TYPE) {
/* Minimum length of a event mask config is 12 + 4 bytes for
atleast one event id to be set or reset. */
@ -2021,9 +2049,11 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
read_len += sizeof(int);
/* find client table entry */
mutex_lock(&driver->dci_mutex);
dci_entry = diag_dci_get_client_entry(client_id);
if (!dci_entry) {
pr_err("diag: In %s, invalid client\n", __func__);
mutex_unlock(&driver->dci_mutex);
return ret;
}
client_token = dci_entry->client_info.token;
@ -2034,6 +2064,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
pr_err("diag: dci: Invalid number of event ids %d\n",
num_codes);
mutex_unlock(&driver->dci_mutex);
return -EIO;
}
@ -2041,6 +2072,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
if (!event_mask_ptr) {
pr_err("diag: dci: Invalid event mask pointer in %s\n",
__func__);
mutex_unlock(&driver->dci_mutex);
return -ENOMEM;
}
pr_debug("diag: head of dci event mask %p\n", event_mask_ptr);
@ -2049,12 +2081,14 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
if (read_len >= USER_SPACE_DATA) {
pr_err("diag: dci: Invalid length for event type in %s",
__func__);
mutex_unlock(&driver->dci_mutex);
return -EIO;
}
event_id = *(int *)temp;
byte_index = event_id/8;
if (byte_index >= DCI_EVENT_MASK_SIZE) {
pr_err("diag: dci: Event type, invalid byte index\n");
mutex_unlock(&driver->dci_mutex);
return ret;
}
bit_index = event_id % 8;
@ -2080,6 +2114,7 @@ int diag_process_dci_transaction(unsigned char *buf, int len)
diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
/* send updated mask to peripherals */
ret = dci_ops_tbl[client_token].send_event_mask(client_token);
mutex_unlock(&driver->dci_mutex);
} else {
pr_alert("diag: Incorrect DCI transaction\n");
}

View File

@ -1547,7 +1547,10 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
goto exit;
}
exit:
mutex_unlock(&driver->diagchar_mutex);
if (driver->data_ready[index] & DCI_DATA_TYPE) {
mutex_lock(&driver->dci_mutex);
/* Copy the type of data being passed */
data_type = driver->data_ready[index] & DCI_DATA_TYPE;
list_for_each_safe(start, temp, &driver->dci_client_list) {
@ -1557,15 +1560,26 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
continue;
if (!entry->in_service)
continue;
COPY_USER_SPACE_OR_EXIT(buf + ret, data_type,
sizeof(int));
COPY_USER_SPACE_OR_EXIT(buf + ret,
entry->client_info.token, sizeof(int));
if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
mutex_unlock(&driver->dci_mutex);
goto end;
}
ret += sizeof(int);
if (copy_to_user(buf + ret, &entry->client_info.token,
sizeof(int))) {
mutex_unlock(&driver->dci_mutex);
goto end;
}
ret += sizeof(int);
copy_dci_data = 1;
exit_stat = diag_copy_dci(buf, count, entry, &ret);
mutex_lock(&driver->diagchar_mutex);
driver->data_ready[index] ^= DCI_DATA_TYPE;
if (exit_stat == 1)
goto exit;
mutex_unlock(&driver->diagchar_mutex);
if (exit_stat == 1) {
mutex_unlock(&driver->dci_mutex);
goto end;
}
}
for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
if (driver->smd_dci[i].ch) {
@ -1584,10 +1598,10 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
}
}
}
goto exit;
mutex_unlock(&driver->dci_mutex);
goto end;
}
exit:
mutex_unlock(&driver->diagchar_mutex);
end:
/*
* Flush any read that is currently pending on DCI data and
* command channnels. This will ensure that the next read is not

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -451,7 +451,7 @@ int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
return 0;
}
t = min_t(unsigned int, group->reg_count, count);
t = min_t(int, group->reg_count, count);
buf = kmalloc(t * sizeof(unsigned int), GFP_KERNEL);
if (buf == NULL) {

View File

@ -1793,7 +1793,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },

View File

@ -755,6 +755,7 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2

View File

@ -271,8 +271,8 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = VAIO_RDESC_CONSTANT },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);

View File

@ -36,7 +36,7 @@
#include <media/msmb_camera.h>
#if defined(CONFIG_SONY_CAM_V4L2)
#define MSM_POST_EVT_TIMEOUT 4000
#define MSM_POST_EVT_TIMEOUT 14000
#else
#define MSM_POST_EVT_TIMEOUT 5000
#endif

View File

@ -726,20 +726,25 @@ int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
return 0;
}
static bool valid_v4l2_buffer(struct v4l2_buffer *b,
struct msm_vidc_inst *inst) {
enum vidc_ports port =
!V4L2_TYPE_IS_MULTIPLANAR(b->type) ? MAX_PORT_NUM :
b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? CAPTURE_PORT :
b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? OUTPUT_PORT :
MAX_PORT_NUM;
return port != MAX_PORT_NUM &&
inst->fmts[port]->num_planes == b->length;
}
int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b)
{
struct msm_vidc_inst *inst = instance;
if (!inst || !b)
if (!inst || !b || !valid_v4l2_buffer(b, inst))
return -EINVAL;
if (!V4L2_TYPE_IS_MULTIPLANAR(b->type) || !b->length ||
(b->length > VIDEO_MAX_PLANES)) {
dprintk(VIDC_ERR, "%s: wrong input params\n",
__func__);
return -EINVAL;
}
if (is_dynamic_output_buffer_mode(b, inst)) {
dprintk(VIDC_ERR, "%s: not supported in dynamic buffer mode\n",
__func__);
@ -889,16 +894,9 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
int rc = 0;
int i;
if (!inst || !b)
if (!inst || !b || !valid_v4l2_buffer(b, inst))
return -EINVAL;
if (!V4L2_TYPE_IS_MULTIPLANAR(b->type) || !b->length ||
(b->length > VIDEO_MAX_PLANES)) {
dprintk(VIDC_ERR, "%s: wrong input params\n",
__func__);
return -EINVAL;
}
if (is_dynamic_output_buffer_mode(b, inst)) {
if (b->m.planes[0].reserved[0])
inst->map_output_buffer = true;
@ -974,16 +972,9 @@ int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
struct buffer_info *buffer_info = NULL;
int i = 0, rc = 0;
if (!inst || !b)
if (!inst || !b || !valid_v4l2_buffer(b, inst))
return -EINVAL;
if (!V4L2_TYPE_IS_MULTIPLANAR(b->type) || !b->length ||
(b->length > VIDEO_MAX_PLANES)) {
dprintk(VIDC_ERR, "%s: wrong input params\n",
__func__);
return -EINVAL;
}
if (inst->session_type == MSM_VIDC_DECODER)
rc = msm_vdec_dqbuf(instance, b);
if (inst->session_type == MSM_VIDC_ENCODER)

View File

@ -1308,6 +1308,9 @@ int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo)
dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
cnt = -EINVAL;
}
if (cnt == 2)
inst->buffers_held_in_driver++;
dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
return cnt;
@ -1355,6 +1358,7 @@ int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo)
binfo->fd[0]);
binfo->pending_deletion = true;
} else if (qbuf_again) {
inst->buffers_held_in_driver--;
rc = qbuf_dynamic_buf(inst, binfo);
if (!rc)
return rc;

View File

@ -136,17 +136,8 @@ static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
if (inst->state >= MSM_VIDC_OPEN_DONE &&
inst->state < MSM_VIDC_STOP_DONE) {
struct buffer_info *temp = NULL;
fw_out_qsize = inst->count.ftb - inst->count.fbd;
list_for_each_entry(temp, &inst->registeredbufs.list, list) {
if (temp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
!temp->inactive &&
atomic_read(&temp->ref_count) == 2) {
buffers_in_driver++;
}
}
buffers_in_driver = inst->buffers_held_in_driver;
}
return fw_out_qsize + buffers_in_driver;

View File

@ -313,6 +313,7 @@ struct msm_vidc_inst {
atomic_t seq_hdr_reqs;
struct v4l2_ctrl **ctrls;
bool dcvs_mode;
u32 buffers_held_in_driver;
};
extern struct msm_vidc_drv *vidc_driver;

View File

@ -1191,8 +1191,8 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
WARN_ON(host->cmd);
/* Wait max 10 ms */
timeout = 10000;
/* Wait max 100 ms */
timeout = 100000;
mask = SDHCI_CMD_INHIBIT;
if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))

View File

@ -993,7 +993,7 @@ union IpaHwMhiDlUlSyncCmdData_t {
*/
struct ipa_uc_ctx {
bool uc_inited;
atomic_t uc_loaded;
bool uc_loaded;
bool uc_failed;
struct mutex uc_lock;
struct completion uc_completion;

View File

@ -242,7 +242,7 @@ int ipa_uc_state_check(void)
return -EFAULT;
}
if (!atomic_read(&ipa_ctx->uc_ctx.uc_loaded)) {
if (!ipa_ctx->uc_ctx.uc_loaded) {
IPAERR("uC is not loaded\n");
return -EFAULT;
}
@ -396,7 +396,7 @@ static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt,
/* General handling */
if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp ==
IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
atomic_set(&ipa_ctx->uc_ctx.uc_loaded, 1);
ipa_ctx->uc_ctx.uc_loaded = true;
IPAERR("IPA uC loaded\n");
/*
* The proxy vote is held until uC is loaded to ensure that

View File

@ -13,9 +13,7 @@
* drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
* higher when the free memory drops below 1024 pages.
*
* The driver considers memory used for caches to be free, but if a large
* percentage of the cached memory is locked this can be very inaccurate
* and processes may not get killed until the normal oom killer is triggered.
* The driver considers memory used for caches to be free.
*
* Copyright (C) 2007-2008 Google, Inc.
*
@ -348,10 +346,11 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
other_free = global_page_state(NR_FREE_PAGES);
if (global_page_state(NR_SHMEM) + total_swapcache_pages() <
global_page_state(NR_FILE_PAGES))
if (global_page_state(NR_SHMEM) + global_page_state(NR_MLOCK_FILE) +
total_swapcache_pages() < global_page_state(NR_FILE_PAGES))
other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM) -
global_page_state(NR_MLOCK_FILE) -
total_swapcache_pages();
else
other_file = 0;

View File

@ -1,25 +0,0 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
Pages written to these disks are compressed and stored in memory
itself. These disks allow very fast I/O and compression provides
good amounts of memory savings.
It has several use cases, for example: /tmp storage, use as swap
disks and maybe many more.
See zram.txt for more information.
Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
depends on ZRAM
default n
help
This option adds additional debugging code to the compressed
RAM block device driver.

View File

@ -1,3 +0,0 @@
zram-y := zram_drv.o
obj-$(CONFIG_ZRAM) += zram.o

View File

@ -1,77 +0,0 @@
zram: Compressed RAM based block devices
----------------------------------------
Project home: http://compcache.googlecode.com/
* Introduction
The zram module creates RAM based block devices named /dev/zram<id>
(<id> = 0, 1, ...). Pages written to these disks are compressed and stored
in memory itself. These disks allow very fast I/O and compression provides
good amounts of memory savings. Some of the usecases include /tmp storage,
use as swap disks, various caches under /var and maybe many more :)
Statistics for individual zram devices are exported through sysfs nodes at
/sys/block/zram<id>/
* Usage
Following shows a typical sequence of steps for using zram.
1) Load Module:
modprobe zram num_devices=4
This creates 4 devices: /dev/zram{0,1,2,3}
(num_devices parameter is optional. Default: 1)
2) Set Disksize
Set disk size by writing the value to sysfs node 'disksize'.
The value can be either in bytes or you can use mem suffixes.
Examples:
# Initialize /dev/zram0 with 50MB disksize
echo $((50*1024*1024)) > /sys/block/zram0/disksize
# Using mem suffixes
echo 256K > /sys/block/zram0/disksize
echo 512M > /sys/block/zram0/disksize
echo 1G > /sys/block/zram0/disksize
3) Activate:
mkswap /dev/zram0
swapon /dev/zram0
mkfs.ext4 /dev/zram1
mount /dev/zram1 /tmp
4) Stats:
Per-device statistics are exported as various nodes under
/sys/block/zram<id>/
disksize
num_reads
num_writes
invalid_io
notify_free
discard
zero_pages
orig_data_size
compr_data_size
mem_used_total
5) Deactivate:
swapoff /dev/zram0
umount /dev/zram1
6) Reset:
Write any positive value to 'reset' sysfs node
echo 1 > /sys/block/zram0/reset
echo 1 > /sys/block/zram1/reset
This frees all the memory allocated for the given device and
resets the disksize to zero. You must set the disksize again
before reusing the device.
Please report any problems at:
- Mailing list: linux-mm-cc at laptop dot org
- Issue tracker: http://code.google.com/p/compcache/issues/list
Nitin Gupta
ngupta@vflare.org

File diff suppressed because it is too large Load Diff

View File

@ -1,125 +0,0 @@
/*
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
*
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
* Project home: http://compcache.googlecode.com
*/
#ifndef _ZRAM_DRV_H_
#define _ZRAM_DRV_H_
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include "../zsmalloc/zsmalloc.h"
/*
* Some arbitrary value. This is just to catch
* invalid value for num_devices module parameter.
*/
static const unsigned max_num_devices = 32;
/*-- Configurable parameters */
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
*/
static const size_t max_zpage_size = PAGE_SIZE / 10 * 9;
/*
* NOTE: max_zpage_size must be less than or equal to:
* ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
* always return failure.
*/
/*-- End of configurable params */
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
#define ZRAM_LOGICAL_BLOCK_SHIFT 12
#define ZRAM_LOGICAL_BLOCK_SIZE (1 << ZRAM_LOGICAL_BLOCK_SHIFT)
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
/* Page consists entirely of zeros */
ZRAM_ZERO,
__NR_ZRAM_PAGEFLAGS,
};
/*-- Data structures */
/* Allocated for each disk page */
struct table {
unsigned long handle;
u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
} __aligned(4);
/*
* All 64bit fields should only be manipulated by 64bit atomic accessors.
* All modifications to 32bit counter should be protected by zram->lock.
*/
struct zram_stats {
atomic64_t compr_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
atomic64_t num_writes; /* --do-- */
atomic64_t failed_reads; /* should NEVER! happen */
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
u32 bad_compress; /* % of pages with compression ratio>=75% */
};
struct zram_meta {
void *compress_workmem;
void *compress_buffer;
struct table *table;
struct zs_pool *mem_pool;
};
struct zram_slot_free {
unsigned long index;
struct zram_slot_free *next;
};
struct zram {
struct zram_meta *meta;
struct rw_semaphore lock; /* protect compression buffers, table,
* 32bit stat counters against concurrent
* notifications, reads and writes */
struct work_struct free_work; /* handle pending free request */
struct zram_slot_free *slot_free_rq; /* list head of free request */
struct request_queue *queue;
struct gendisk *disk;
int init_done;
/* Prevent concurrent execution of device init, reset and R/W request */
struct rw_semaphore init_lock;
/*
* This is the limit on amount of *uncompressed* worth of data
* we can store in a disk.
*/
u64 disksize; /* bytes */
spinlock_t slot_free_lock;
struct zram_stats stats;
};
#endif

View File

@ -99,6 +99,16 @@ static long validate_and_copy(unsigned int *cmd, unsigned long *arg,
goto validate_exit;
}
break;
case MSM_THERMAL_GET_CLUSTER_FREQUENCY_PLAN:
if (query->clock_freq.cluster_num >= NR_CPUS) {
ret = -EINVAL;
goto validate_exit;
}
case MSM_THERMAL_GET_CLUSTER_VOLTAGE_PLAN:
if (query->voltage.cluster_num >= NR_CPUS) {
ret = -EINVAL;
goto validate_exit;
}
default:
break;
}

View File

@ -413,13 +413,22 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
atomic_dec(&msm_uport->clk_count);
pm_runtime_mark_last_busy(uport->dev);
pm_runtime_put_autosuspend(uport->dev);
__pm_relax(&msm_uport->ws);
}
/* Vote for resources before accessing them */
static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
{
int ret;
struct uart_port *uport = &(msm_uport->uport);
pm_runtime_get_sync(uport->dev);
__pm_stay_awake(&msm_uport->ws);
ret = pm_runtime_get_sync(uport->dev);
if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
MSM_HS_WARN("%s(): %p runtime PM callback not invoked",
__func__, uport->dev);
msm_hs_pm_resume(uport->dev);
}
atomic_inc(&msm_uport->clk_count);
}
@ -692,6 +701,7 @@ static int msm_hs_remove(struct platform_device *pdev)
msm_uport->rx.buffer = NULL;
msm_uport->rx.rbuffer = 0;
wakeup_source_trash(&msm_uport->ws);
destroy_workqueue(msm_uport->hsuart_wq);
mutex_destroy(&msm_uport->mtx);
@ -2248,7 +2258,6 @@ void msm_hs_request_clock_off(struct uart_port *uport)
if (msm_uport->obs)
atomic_set(&msm_uport->client_req_state, 1);
msm_hs_resource_unvote(msm_uport);
__pm_relax(&msm_uport->ws);
}
EXPORT_SYMBOL(msm_hs_request_clock_off);
@ -2257,13 +2266,6 @@ void msm_hs_request_clock_on(struct uart_port *uport)
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
msm_hs_resource_vote(UARTDM_TO_MSM(uport));
__pm_stay_awake(&msm_uport->ws);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
MSM_HS_WARN("%s(): %p runtime PM callback not invoked",
__func__, uport->dev);
msm_hs_pm_resume(uport->dev);
}
/* Clear the flag */
if (msm_uport->obs)
atomic_set(&msm_uport->client_req_state, 0);
@ -2506,7 +2508,6 @@ static int msm_hs_startup(struct uart_port *uport)
struct msm_hs_rx *rx = &msm_uport->rx;
struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
struct tty_struct *tty = msm_uport->uport.state->port.tty;
rfr_level = uport->fifosize;
if (rfr_level > 16)
@ -2540,7 +2541,6 @@ static int msm_hs_startup(struct uart_port *uport)
}
}
wakeup_source_init(&msm_uport->ws, tty->name);
ret = msm_hs_config_uart_gpios(uport);
if (ret) {
MSM_HS_ERR("Uart GPIO request failed\n");
@ -3196,6 +3196,7 @@ static int msm_hs_probe(struct platform_device *pdev)
int core_irqres, bam_irqres, wakeup_irqres;
struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
unsigned long data;
struct tty_struct *tty;
if (pdev->dev.of_node) {
dev_dbg(&pdev->dev, "device tree enabled\n");
@ -3410,6 +3411,8 @@ static int msm_hs_probe(struct platform_device *pdev)
uport->line = pdata->userid;
ret = uart_add_one_port(&msm_hs_driver, uport);
if (!ret) {
tty = msm_uport->uport.state->port.tty;
wakeup_source_init(&msm_uport->ws, tty->name);
msm_hs_clk_bus_unvote(msm_uport);
msm_serial_hs_rt_init(uport);
return ret;
@ -3488,8 +3491,8 @@ static void msm_hs_shutdown(struct uart_port *uport)
else
disable_irq(uport->irq);
wakeup_source_trash(&msm_uport->ws);
msm_uport->wakeup.enabled = false;
/* make sure tx lh finishes */
flush_kthread_worker(&msm_uport->tx.kworker);
ret = wait_event_timeout(msm_uport->tx.wait,

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -3264,8 +3264,7 @@ static int mdss_mdp_hw_cursor_pipe_update(struct msm_fb_data_type *mfd,
if ((size != mfd->cursor_buf_size) || (pre_img_data != img->data)) {
pre_img_data = (char *)(img->data);
#else
if ((size != mfd->cursor_buf_size) ||
(cursor->set & FB_CUR_SETIMAGE)) {
if (size != mfd->cursor_buf_size) {
#endif /* CONFIG_FB_MSM_MDSS_SPECIFIC_PANEL */
pr_debug("allocating cursor mem size:%zd\n", size);

View File

@ -157,13 +157,11 @@ static int ext4_readdir(struct file *filp,
&filp->f_ra, filp,
index, 1);
filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err);
bh = ext4_bread(NULL, inode, map.m_lblk, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
}
/*
* We ignore I/O errors on directories so users have a chance
* of recovering data when there's a bad sector
*/
if (!bh) {
if (!dir_has_error) {
EXT4_ERROR_FILE(filp, 0,

View File

@ -2068,9 +2068,8 @@ extern int ext4_trim_fs(struct super_block *, struct fstrim_range *,
/* inode.c */
struct buffer_head *ext4_getblk(handle_t *, struct inode *,
ext4_lblk_t, int, int *);
struct buffer_head *ext4_bread(handle_t *, struct inode *,
ext4_lblk_t, int, int *);
ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock,

View File

@ -811,7 +811,7 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
* `handle' can be NULL if create is zero
*/
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int create, int *errp)
ext4_lblk_t block, int create)
{
struct ext4_map_blocks map;
struct buffer_head *bh;
@ -825,19 +825,14 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
create ? EXT4_GET_BLOCKS_CREATE : 0);
/* ensure we send some value back into *errp */
*errp = 0;
if (create && err == 0)
err = -ENOSPC; /* should never happen */
if (err == 0)
return create ? ERR_PTR(-ENOSPC) : NULL;
if (err < 0)
*errp = err;
if (err <= 0)
return NULL;
return ERR_PTR(err);
bh = sb_getblk(inode->i_sb, map.m_pblk);
if (unlikely(!bh)) {
*errp = -ENOMEM;
return NULL;
return ERR_PTR(-ENOMEM);
}
if (map.m_flags & EXT4_MAP_NEW) {
J_ASSERT(create != 0);
@ -866,7 +861,6 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
BUFFER_TRACE(bh, "not a new buffer");
}
if (fatal) {
*errp = fatal;
brelse(bh);
bh = NULL;
}
@ -874,22 +868,21 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
}
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int create, int *err)
ext4_lblk_t block, int create)
{
struct buffer_head *bh;
bh = ext4_getblk(handle, inode, block, create, err);
if (!bh)
bh = ext4_getblk(handle, inode, block, create);
if (IS_ERR(bh))
return bh;
if (buffer_uptodate(bh))
if (!bh || buffer_uptodate(bh))
return bh;
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
put_bh(bh);
*err = -EIO;
return NULL;
return ERR_PTR(-EIO);
}
int ext4_walk_page_buffers(handle_t *handle,

View File

@ -53,7 +53,7 @@ static struct buffer_head *ext4_append(handle_t *handle,
ext4_lblk_t *block)
{
struct buffer_head *bh;
int err = 0;
int err;
if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
((inode->i_size >> 10) >=
@ -62,9 +62,9 @@ static struct buffer_head *ext4_append(handle_t *handle,
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
bh = ext4_bread(handle, inode, *block, 1, &err);
if (!bh)
return ERR_PTR(err);
bh = ext4_bread(handle, inode, *block, 1);
if (IS_ERR(bh))
return bh;
inode->i_size += inode->i_sb->s_blocksize;
EXT4_I(inode)->i_disksize = inode->i_size;
err = ext4_journal_get_write_access(handle, bh);
@ -93,21 +93,23 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
{
struct buffer_head *bh;
struct ext4_dir_entry *dirent;
int err = 0, is_dx_block = 0;
int is_dx_block = 0;
bh = ext4_bread(NULL, inode, block, 0, &err);
if (!bh) {
if (err == 0) {
ext4_error_inode(inode, __func__, line, block,
"Directory hole found");
return ERR_PTR(-EIO);
}
bh = ext4_bread(NULL, inode, block, 0);
if (IS_ERR(bh)) {
__ext4_warning(inode->i_sb, __func__, line,
"error reading directory block "
"(ino %lu, block %lu)", inode->i_ino,
(unsigned long) block);
return ERR_PTR(err);
"error %ld reading directory block(ino %lu, block %lu)",
PTR_ERR(bh), inode->i_ino,
(unsigned long) block);
return bh;
}
if (!bh) {
ext4_error_inode(inode, __func__, line, block,
"Directory hole found");
return ERR_PTR(-EIO);
}
dirent = (struct ext4_dir_entry *) bh->b_data;
/* Determine whether or not we have an index block */
if (is_dx(inode)) {
@ -640,7 +642,9 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
struct stats stats;
printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue;
bh = ext4_bread(NULL, dir, block, 0);
if (!bh || IS_ERR(bh))
continue;
stats = levels?
dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
@ -1293,7 +1297,7 @@ restart:
break;
}
num++;
bh = ext4_getblk(NULL, dir, b++, 0, &err);
bh = ext4_getblk(NULL, dir, b++, 0);
bh_use[ra_max] = bh;
if (bh)
ll_rw_block(READ | REQ_META | REQ_PRIO,

View File

@ -5141,7 +5141,6 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
{
struct inode *inode = sb_dqopt(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t toread;
@ -5156,9 +5155,9 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
while (toread > 0) {
tocopy = sb->s_blocksize - offset < toread ?
sb->s_blocksize - offset : toread;
bh = ext4_bread(NULL, inode, blk, 0, &err);
if (err)
return err;
bh = ext4_bread(NULL, inode, blk, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
if (!bh) /* A hole? */
memset(data, 0, tocopy);
else
@ -5179,8 +5178,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
{
struct inode *inode = sb_dqopt(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int err, offset = off & (sb->s_blocksize - 1);
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
@ -5201,13 +5199,15 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
return -EIO;
}
bh = ext4_bread(handle, inode, blk, 1, &err);
bh = ext4_bread(handle, inode, blk, 1);
if (IS_ERR(bh))
return PTR_ERR(bh);
if (!bh)
goto out;
err = ext4_journal_get_write_access(handle, bh);
if (err) {
brelse(bh);
goto out;
return err;
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, len);
@ -5216,8 +5216,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
out:
if (err)
return err;
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;

View File

@ -65,6 +65,9 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
return NULL;
ff->rw_lower_file = NULL;
ff->shortcircuit_enabled = 0;
if (fc->shortcircuit_io)
ff->shortcircuit_enabled = 1;
ff->fc = fc;
ff->reserved_req = fuse_request_alloc(0);
if (unlikely(!ff->reserved_req)) {
@ -987,7 +990,7 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
return err;
}
if (ff && ff->rw_lower_file)
if (ff && ff->shortcircuit_enabled && ff->rw_lower_file)
ret_val = fuse_shortcircuit_aio_read(iocb, iov, nr_segs, pos);
else
ret_val = generic_file_aio_read(iocb, iov, nr_segs, pos);
@ -1277,7 +1280,7 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err)
goto out;
if (ff && ff->rw_lower_file) {
if (ff && ff->shortcircuit_enabled && ff->rw_lower_file) {
/* Use iocb->ki_pos instead of pos to handle the cases of files
* that are opened with O_APPEND. For example if multiple
* processes open the same file with O_APPEND then the
@ -1875,6 +1878,9 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fuse_file *ff = file->private_data;
ff->shortcircuit_enabled = 0;
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
fuse_link_write_file(file);
@ -1885,6 +1891,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fuse_file *ff = file->private_data;
ff->shortcircuit_enabled = 0;
/* Can't provide the coherency needed for MAP_SHARED */
if (vma->vm_flags & VM_MAYSHARE)
return -ENODEV;

View File

@ -170,6 +170,7 @@ struct fuse_file {
/* the read write file */
struct file *rw_lower_file;
bool shortcircuit_enabled;
};
/** One input argument of a request */

View File

@ -1,358 +0,0 @@
/* drivers/input/touchscreen/maxim_sti.c
*
* Maxim SmartTouch Imager Touchscreen Driver
*
* Copyright (c)2013 Maxim Integrated Products, Inc.
* Copyright (C) 2013, NVIDIA Corporation. All Rights Reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*
* Copyright (C) 2014 Sony Mobile Communications Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*/
#ifndef __MAXIM_STI_H__
#define __MAXIM_STI_H__
#ifdef __KERNEL__
#include <net/genetlink.h>
#include <net/sock.h>
#else
#include <stdlib.h>
#include "genetlink.h"
#endif
#define DRV_VER_MAJOR 6
#define DRV_VER_MINOR 7
#define DRIVER_VERSION ((DRV_VER_MAJOR << 8) | DRV_VER_MINOR)
#define DRIVER_RELEASE "July 21, 2015"
#define DRIVER_PROTOCOL 0x0103
/****************************************************************************\
* Netlink: common kernel/user space macros *
\****************************************************************************/
#define NL_BUF_SIZE 30720
#define NL_ATTR_FIRST(nptr) \
((struct nlattr *)((void *)nptr + NLMSG_HDRLEN + GENL_HDRLEN))
#define NL_ATTR_LAST(nptr) \
((struct nlattr *)((void *)nptr + \
NLMSG_ALIGN(((struct nlmsghdr *)nptr)->nlmsg_len)))
#define NL_SIZE(nptr) NLMSG_ALIGN(((struct nlmsghdr *)nptr)->nlmsg_len)
#define NL_TYPE(nptr) (((struct nlmsghdr *)nptr)->nlmsg_type)
#define NL_SEQ(nptr) (((struct nlmsghdr *)nptr)->nlmsg_seq)
#define NL_OK(nptr) (NL_TYPE(nptr) >= NLMSG_MIN_TYPE)
#define NL_ATTR_VAL(aptr, type) ((type *)((void *)aptr + NLA_HDRLEN))
#define NL_ATTR_NEXT(aptr) \
((struct nlattr *)((void *)aptr + \
NLA_ALIGN(((struct nlattr *)aptr)->nla_len)))
#define GENL_CMP(name1, name2) strncmp(name1, name2, GENL_NAMSIZ)
#define GENL_COPY(name1, name2) strlcpy(name1, name2, GENL_NAMSIZ)
#define GENL_CHK(name) (strlen(name) > (GENL_NAMSIZ - 1))
#define MSG_TYPE(nptr) NL_ATTR_FIRST(nptr)->nla_type
#define MSG_PAYLOAD(nptr) NL_ATTR_VAL(NL_ATTR_FIRST(nptr), void)
/****************************************************************************\
* Netlink: common kernel/user space inline functions *
\****************************************************************************/
static inline void
nl_msg_init(void *buf, __u16 family_id, __u32 sequence, __u8 dst)
{
struct nlmsghdr *nlh = (struct nlmsghdr *)buf;
struct genlmsghdr *genl = (struct genlmsghdr *)(buf + NLMSG_HDRLEN);
memset(buf, 0, NLMSG_HDRLEN + GENL_HDRLEN);
nlh->nlmsg_type = family_id;
nlh->nlmsg_flags = NLM_F_REQUEST;
nlh->nlmsg_seq = sequence;
nlh->nlmsg_len = NLMSG_HDRLEN + GENL_HDRLEN;
genl->cmd = dst;
}
static inline void
*nl_alloc_attr(void *buf, __u16 type, __u16 len)
{
struct nlmsghdr *nlh = (struct nlmsghdr *)buf;
struct nlattr *attr = NL_ATTR_LAST(nlh);
if ((NL_SIZE(buf) + NLMSG_ALIGN(NLA_HDRLEN + len)) > NL_BUF_SIZE)
return NULL;
attr->nla_type = type;
attr->nla_len = NLA_HDRLEN + len;
nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
return NL_ATTR_VAL(attr, void);
}
static inline int
nl_add_attr(void *buf, __u16 type, void *ptr, __u16 len)
{
void *a_ptr;
a_ptr = nl_alloc_attr(buf, type, len);
if (a_ptr == NULL)
return -EPERM;
memcpy(a_ptr, ptr, len);
return 0;
}
/****************************************************************************\
* Netlink: multicast groups enum and name strings *
\****************************************************************************/
enum {
MC_DRIVER,
MC_FUSION,
MC_REQUIRED_GROUPS,
};
#define MC_DRIVER_NAME "driver"
#define MC_FUSION_NAME "fusion"
#define NL_FAMILY_VERSION 1
#define TF_FAMILY_NAME "touch_fusion"
/****************************************************************************\
* Netlink: common parameter and message definitions *
\****************************************************************************/
enum {
DR_STATE_BASIC,
DR_STATE_ACTIVE,
DR_STATE_SUSPEND,
DR_STATE_RESUME,
DR_STATE_FAULT,
};
enum {
DR_INPUT_FINGER,
DR_INPUT_STYLUS,
DR_INPUT_ERASER,
};
enum {
DR_IRQ_FALLING_EDGE,
DR_IRQ_RISING_EDGE,
};
enum {
DR_ADD_MC_GROUP,
DR_ECHO_REQUEST,
DR_CHIP_READ,
DR_CHIP_WRITE,
DR_CHIP_RESET,
DR_GET_IRQLINE,
DR_DELAY,
DR_CHIP_ACCESS_METHOD,
DR_CONFIG_IRQ,
DR_CONFIG_INPUT,
DR_CONFIG_WATCHDOG,
DR_DECONFIG,
DR_INPUT,
DR_RESUME_ACK,
DR_LEGACY_FWDL,
DR_LEGACY_ACCELERATION,
DR_HANDSHAKE,
DR_CONFIG_FW,
DR_IDLE,
DR_SYSFS_ACK,
DR_TF_STATUS,
};
#define DR_SYSFS_UPDATE_NONE 0x0000
#define DR_SYSFS_UPDATE_BIT_GLOVE 0
#define DR_SYSFS_UPDATE_BIT_CHARGER 1
#define DR_SYSFS_UPDATE_BIT_LCD_FPS 2
#define DR_SYSFS_ACK_GLOVE 0x5A5A5A5A
#define DR_SYSFS_ACK_CHARGER 0xA5A5A5A5
#define DR_SYSFS_ACK_LCD_FPS 0xC3C3C3C3
#define TF_STATUS_DEFAULT_LOADED (1 << 0)
#define TF_STATUS_BUSY (1 << 1)
enum {
DR_NO_CHARGER,
DR_WIRED_CHARGER,
DR_WIRELESS_CHARGER,
};
struct __attribute__ ((__packed__)) dr_add_mc_group {
__u8 number;
char name[GENL_NAMSIZ];
};
struct __attribute__ ((__packed__)) dr_echo_request {
__u32 cookie;
};
struct __attribute__ ((__packed__)) dr_chip_read {
__u16 address;
__u16 length;
};
struct __attribute__ ((__packed__)) dr_chip_write {
__u16 address;
__u16 length;
__u8 data[0];
};
struct __attribute__ ((__packed__)) dr_chip_reset {
__u8 state;
};
struct __attribute__ ((__packed__)) dr_delay {
__u32 period;
};
struct __attribute__ ((__packed__)) dr_chip_access_method {
__u8 method;
};
#define OLD_NIRQ_PARAMS 27
#define MAX_IRQ_PARAMS 37
struct __attribute__ ((__packed__)) dr_config_irq {
__u8 irq_params;
__u16 irq_param[MAX_IRQ_PARAMS];
__u8 irq_method;
__u8 irq_edge;
};
struct __attribute__ ((__packed__)) dr_config_input {
__u16 x_range;
__u16 y_range;
};
struct __attribute__ ((__packed__)) dr_config_watchdog {
__u32 pid;
};
struct __attribute__ ((__packed__)) dr_input_event {
__u8 id;
__u8 tool_type;
__u16 x;
__u16 y;
__u8 z;
};
#define MAX_INPUT_EVENTS 10
struct __attribute__ ((__packed__)) dr_input {
struct dr_input_event event[MAX_INPUT_EVENTS];
__u8 events;
};
struct __attribute__ ((__packed__)) dr_legacy_acceleration {
__u8 enable;
};
struct __attribute__ ((__packed__)) dr_handshake {
__u16 tf_ver;
__u16 chip_id;
};
struct __attribute__ ((__packed__)) dr_sysfs_ack {
__u32 type;
};
struct __attribute__ ((__packed__)) dr_config_fw {
__u16 fw_ver;
__u16 fw_protocol;
};
struct __attribute__ ((__packed__)) dr_idle {
__u8 idle;
};
struct __attribute__ ((__packed__)) dr_tf_status {
__u32 tf_status;
};
enum {
FU_ECHO_RESPONSE,
FU_CHIP_READ_RESULT,
FU_IRQLINE_STATUS,
FU_ASYNC_DATA,
FU_RESUME,
FU_HANDSHAKE_RESPONSE,
FU_SYSFS_INFO,
};
struct __attribute__ ((__packed__)) fu_echo_response {
__u32 cookie;
__u8 driver_state;
};
struct __attribute__ ((__packed__)) fu_chip_read_result {
__u16 address;
__u16 length;
__u8 data[0];
};
struct __attribute__ ((__packed__)) fu_irqline_status {
__u8 status;
};
struct __attribute__ ((__packed__)) fu_async_data {
__u16 address;
__u16 length;
__u16 status;
__u8 data[0];
};
struct __attribute__ ((__packed__)) fu_handshake_response {
__u16 driver_ver;
__u16 panel_id;
__u16 driver_protocol;
};
struct __attribute__ ((__packed__)) fu_sysfs_info {
__u8 type;
__u16 glove_value;
__u16 charger_value;
__u16 lcd_fps_value;
};
#ifdef __KERNEL__
/****************************************************************************\
* Kernel platform data structure *
\****************************************************************************/
#define MAXIM_STI_NAME "maxim_sti"
struct maxim_sti_pdata {
char *touch_fusion;
char *config_file;
char *nl_family;
char *fw_name;
u32 nl_mc_groups;
u32 chip_access_method;
u32 default_reset_state;
u32 tx_buf_size;
u32 rx_buf_size;
u32 gpio_reset;
u32 gpio_irq;
int (*init)(struct device *dev,
struct maxim_sti_pdata *pdata, bool init);
void (*reset)(struct maxim_sti_pdata *pdata, int value);
int (*irq)(struct maxim_sti_pdata *pdata);
u32 stylus_support;
u32 wakeup_gesture_support;
};
#endif
#endif

View File

@ -122,6 +122,7 @@ enum zone_stat_item {
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_MLOCK_FILE, /* mlock()ed pages residing in filecache */
NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */

View File

@ -19,8 +19,8 @@
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*

View File

@ -1948,6 +1948,8 @@ struct cfg80211_qos_map {
* the driver, and will be valid until passed to cfg80211_scan_done().
* For scan results, call cfg80211_inform_bss(); you can call this outside
* the scan/scan_done bracket too.
* @abort_scan: Tell the driver to abort an ongoing scan. The driver shall
* indicate the status of the scan through cfg80211_scan_done().
*
* @auth: Request to authenticate with the specified peer
* @assoc: Request to (re)associate with the specified peer
@ -2177,6 +2179,7 @@ struct cfg80211_ops {
int (*scan)(struct wiphy *wiphy,
struct cfg80211_scan_request *request);
void (*abort_scan)(struct wiphy *wiphy, struct wireless_dev *wdev);
int (*auth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_auth_request *req);

View File

@ -689,6 +689,10 @@
* QoS mapping is relevant for IP packets, it is only valid during an
* association. This is cleared on disassociation and AP restart.
*
* @NL80211_CMD_ABORT_SCAN: Stop an ongoing scan. Returns -ENOENT if a scan is
* not running. The driver indicates the status of the scan through
* cfg80211_scan_done().
*
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@ -860,6 +864,8 @@ enum nl80211_commands {
NL80211_CMD_SET_QOS_MAP,
NL80211_CMD_ABORT_SCAN = NL80211_CMD_SET_QOS_MAP + 10,
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */

View File

@ -6830,6 +6830,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (err)
return err;
if (attr.constraint_duplicate || attr.__reserved_1)
return -EINVAL;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;

View File

@ -17,6 +17,7 @@
#define __MM_INTERNAL_H
#include <linux/mm.h>
#include <linux/mm_inline.h>
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
@ -219,6 +220,9 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
if (page_is_file_cache(page))
mod_zone_page_state(page_zone(page), NR_MLOCK_FILE,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
return 1;
@ -253,8 +257,12 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
local_irq_save(flags);
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
if (page_is_file_cache(page))
__mod_zone_page_state(page_zone(page), NR_MLOCK_FILE, -nr_pages);
SetPageMlocked(newpage);
__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
if (page_is_file_cache(page))
__mod_zone_page_state(page_zone(newpage), NR_MLOCK_FILE, nr_pages);
local_irq_restore(flags);
}
}

View File

@ -18,6 +18,7 @@
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
#include <linux/mm_inline.h>
#include "internal.h"
@ -58,6 +59,9 @@ void clear_page_mlock(struct page *page)
mod_zone_page_state(page_zone(page), NR_MLOCK,
-hpage_nr_pages(page));
if (page_is_file_cache(page))
mod_zone_page_state(page_zone(page), NR_MLOCK_FILE,
-hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGCLEARED);
if (!isolate_lru_page(page)) {
putback_lru_page(page);
@ -82,6 +86,9 @@ void mlock_vma_page(struct page *page)
if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
if (page_is_file_cache(page))
mod_zone_page_state(page_zone(page), NR_MLOCK_FILE,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
@ -113,6 +120,8 @@ unsigned int munlock_vma_page(struct page *page)
if (TestClearPageMlocked(page)) {
unsigned int nr_pages = hpage_nr_pages(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
if (page_is_file_cache(page))
mod_zone_page_state(page_zone(page), NR_MLOCK_FILE, -nr_pages);
page_mask = nr_pages - 1;
if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN;

View File

@ -712,6 +712,7 @@ const char * const vmstat_text[] = {
"nr_active_file",
"nr_unevictable",
"nr_mlock",
"nr_mlock_file",
"nr_anon_pages",
"nr_mapped",
"nr_file_pages",

View File

@ -5408,6 +5408,21 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return err;
}
static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct wireless_dev *wdev = info->user_ptr[1];
if (!rdev->ops->abort_scan)
return -EOPNOTSUPP;
if (!rdev->scan_req)
return -ENOENT;
rdev_abort_scan(rdev, wdev);
return 0;
}
static int nl80211_start_sched_scan(struct sk_buff *skb,
struct genl_info *info)
{
@ -9009,6 +9024,14 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.cmd = NL80211_CMD_ABORT_SCAN,
.doit = nl80211_abort_scan,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.cmd = NL80211_CMD_GET_SCAN,
.policy = nl80211_policy,

View File

@ -378,6 +378,14 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
return ret;
}
static inline void rdev_abort_scan(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev)
{
trace_rdev_abort_scan(&rdev->wiphy, wdev);
rdev->ops->abort_scan(&rdev->wiphy, wdev);
trace_rdev_return_void(&rdev->wiphy);
}
static inline int rdev_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_auth_request *req)

View File

@ -2562,6 +2562,10 @@ TRACE_EVENT(cfg80211_ft_event,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
);
DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
TP_ARGS(wiphy, wdev)
);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH