mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-20 00:11:22 +00:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-next
This commit is contained in:
commit
9662cbc712
@ -210,6 +210,7 @@ enum {
|
||||
|
||||
#define LMP_EV4 0x01
|
||||
#define LMP_EV5 0x02
|
||||
#define LMP_NO_BREDR 0x20
|
||||
#define LMP_LE 0x40
|
||||
|
||||
#define LMP_SNIFF_SUBR 0x02
|
||||
@ -745,6 +746,14 @@ struct hci_rp_read_bd_addr {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a
|
||||
struct hci_rp_read_data_block_size {
|
||||
__u8 status;
|
||||
__le16 max_acl_len;
|
||||
__le16 block_len;
|
||||
__le16 num_blocks;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
|
||||
struct hci_cp_write_page_scan_activity {
|
||||
__le16 interval;
|
||||
|
@ -61,18 +61,11 @@ struct inquiry_cache {
|
||||
|
||||
struct hci_conn_hash {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
unsigned int acl_num;
|
||||
unsigned int sco_num;
|
||||
unsigned int le_num;
|
||||
};
|
||||
|
||||
struct hci_chan_hash {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
unsigned int num;
|
||||
};
|
||||
|
||||
struct bdaddr_list {
|
||||
struct list_head list;
|
||||
bdaddr_t bdaddr;
|
||||
@ -124,7 +117,7 @@ struct adv_entry {
|
||||
#define NUM_REASSEMBLY 4
|
||||
struct hci_dev {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
struct mutex lock;
|
||||
atomic_t refcnt;
|
||||
|
||||
char name[8];
|
||||
@ -188,6 +181,11 @@ struct hci_dev {
|
||||
unsigned int sco_pkts;
|
||||
unsigned int le_pkts;
|
||||
|
||||
__u16 block_len;
|
||||
__u16 block_mtu;
|
||||
__u16 num_blocks;
|
||||
__u16 block_cnt;
|
||||
|
||||
unsigned long acl_last_tx;
|
||||
unsigned long sco_last_tx;
|
||||
unsigned long le_last_tx;
|
||||
@ -200,10 +198,13 @@ struct hci_dev {
|
||||
__u16 discov_timeout;
|
||||
struct delayed_work discov_off;
|
||||
|
||||
struct delayed_work service_cache;
|
||||
|
||||
struct timer_list cmd_timer;
|
||||
struct tasklet_struct cmd_task;
|
||||
struct tasklet_struct rx_task;
|
||||
struct tasklet_struct tx_task;
|
||||
|
||||
struct work_struct rx_work;
|
||||
struct work_struct cmd_work;
|
||||
struct work_struct tx_work;
|
||||
|
||||
struct sk_buff_head rx_q;
|
||||
struct sk_buff_head raw_q;
|
||||
@ -232,7 +233,7 @@ struct hci_dev {
|
||||
struct list_head remote_oob_data;
|
||||
|
||||
struct list_head adv_entries;
|
||||
struct timer_list adv_timer;
|
||||
struct delayed_work adv_work;
|
||||
|
||||
struct hci_dev_stats stat;
|
||||
|
||||
@ -301,15 +302,12 @@ struct hci_conn {
|
||||
unsigned int sent;
|
||||
|
||||
struct sk_buff_head data_q;
|
||||
struct hci_chan_hash chan_hash;
|
||||
struct list_head chan_list;
|
||||
|
||||
struct timer_list disc_timer;
|
||||
struct delayed_work disc_work;
|
||||
struct timer_list idle_timer;
|
||||
struct timer_list auto_accept_timer;
|
||||
|
||||
struct work_struct work_add;
|
||||
struct work_struct work_del;
|
||||
|
||||
struct device dev;
|
||||
atomic_t devref;
|
||||
|
||||
@ -390,15 +388,15 @@ static inline void hci_conn_hash_init(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
INIT_LIST_HEAD(&h->list);
|
||||
spin_lock_init(&h->lock);
|
||||
h->acl_num = 0;
|
||||
h->sco_num = 0;
|
||||
h->le_num = 0;
|
||||
}
|
||||
|
||||
static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
list_add(&c->list, &h->list);
|
||||
list_add_rcu(&c->list, &h->list);
|
||||
switch (c->type) {
|
||||
case ACL_LINK:
|
||||
h->acl_num++;
|
||||
@ -416,7 +414,10 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
|
||||
static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
list_del(&c->list);
|
||||
|
||||
list_del_rcu(&c->list);
|
||||
synchronize_rcu();
|
||||
|
||||
switch (c->type) {
|
||||
case ACL_LINK:
|
||||
h->acl_num--;
|
||||
@ -451,14 +452,18 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
|
||||
__u16 handle)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
struct list_head *p;
|
||||
struct hci_conn *c;
|
||||
|
||||
list_for_each(p, &h->list) {
|
||||
c = list_entry(p, struct hci_conn, list);
|
||||
if (c->handle == handle)
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &h->list, list) {
|
||||
if (c->handle == handle) {
|
||||
rcu_read_unlock();
|
||||
return c;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -466,14 +471,19 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
|
||||
__u8 type, bdaddr_t *ba)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
struct list_head *p;
|
||||
struct hci_conn *c;
|
||||
|
||||
list_for_each(p, &h->list) {
|
||||
c = list_entry(p, struct hci_conn, list);
|
||||
if (c->type == type && !bacmp(&c->dst, ba))
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &h->list, list) {
|
||||
if (c->type == type && !bacmp(&c->dst, ba)) {
|
||||
rcu_read_unlock();
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -481,39 +491,22 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
|
||||
__u8 type, __u16 state)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
struct list_head *p;
|
||||
struct hci_conn *c;
|
||||
|
||||
list_for_each(p, &h->list) {
|
||||
c = list_entry(p, struct hci_conn, list);
|
||||
if (c->type == type && c->state == state)
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &h->list, list) {
|
||||
if (c->type == type && c->state == state) {
|
||||
rcu_read_unlock();
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void hci_chan_hash_init(struct hci_conn *c)
|
||||
{
|
||||
struct hci_chan_hash *h = &c->chan_hash;
|
||||
INIT_LIST_HEAD(&h->list);
|
||||
spin_lock_init(&h->lock);
|
||||
h->num = 0;
|
||||
}
|
||||
|
||||
static inline void hci_chan_hash_add(struct hci_conn *c, struct hci_chan *chan)
|
||||
{
|
||||
struct hci_chan_hash *h = &c->chan_hash;
|
||||
list_add(&chan->list, &h->list);
|
||||
h->num++;
|
||||
}
|
||||
|
||||
static inline void hci_chan_hash_del(struct hci_conn *c, struct hci_chan *chan)
|
||||
{
|
||||
struct hci_chan_hash *h = &c->chan_hash;
|
||||
list_del(&chan->list);
|
||||
h->num--;
|
||||
}
|
||||
|
||||
void hci_acl_connect(struct hci_conn *conn);
|
||||
void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
|
||||
void hci_add_sco(struct hci_conn *conn, __u16 handle);
|
||||
@ -527,7 +520,7 @@ void hci_conn_check_pending(struct hci_dev *hdev);
|
||||
|
||||
struct hci_chan *hci_chan_create(struct hci_conn *conn);
|
||||
int hci_chan_del(struct hci_chan *chan);
|
||||
void hci_chan_hash_flush(struct hci_conn *conn);
|
||||
void hci_chan_list_flush(struct hci_conn *conn);
|
||||
|
||||
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
||||
__u8 sec_level, __u8 auth_type);
|
||||
@ -538,7 +531,6 @@ int hci_conn_change_link_key(struct hci_conn *conn);
|
||||
int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
|
||||
|
||||
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
|
||||
void hci_conn_enter_sniff_mode(struct hci_conn *conn);
|
||||
|
||||
void hci_conn_hold_device(struct hci_conn *conn);
|
||||
void hci_conn_put_device(struct hci_conn *conn);
|
||||
@ -546,7 +538,7 @@ void hci_conn_put_device(struct hci_conn *conn);
|
||||
static inline void hci_conn_hold(struct hci_conn *conn)
|
||||
{
|
||||
atomic_inc(&conn->refcnt);
|
||||
del_timer(&conn->disc_timer);
|
||||
cancel_delayed_work_sync(&conn->disc_work);
|
||||
}
|
||||
|
||||
static inline void hci_conn_put(struct hci_conn *conn)
|
||||
@ -565,7 +557,9 @@ static inline void hci_conn_put(struct hci_conn *conn)
|
||||
} else {
|
||||
timeo = msecs_to_jiffies(10);
|
||||
}
|
||||
mod_timer(&conn->disc_timer, jiffies + timeo);
|
||||
cancel_delayed_work_sync(&conn->disc_work);
|
||||
queue_delayed_work(conn->hdev->workqueue,
|
||||
&conn->disc_work, jiffies + timeo);
|
||||
}
|
||||
}
|
||||
|
||||
@ -597,10 +591,8 @@ static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
|
||||
try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \
|
||||
})
|
||||
|
||||
#define hci_dev_lock(d) spin_lock(&d->lock)
|
||||
#define hci_dev_unlock(d) spin_unlock(&d->lock)
|
||||
#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
|
||||
#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
|
||||
#define hci_dev_lock(d) mutex_lock(&d->lock)
|
||||
#define hci_dev_unlock(d) mutex_unlock(&d->lock)
|
||||
|
||||
struct hci_dev *hci_dev_get(int index);
|
||||
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
|
||||
@ -960,12 +952,16 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
|
||||
/* HCI info for socket */
|
||||
#define hci_pi(sk) ((struct hci_pinfo *) sk)
|
||||
|
||||
/* HCI socket flags */
|
||||
#define HCI_PI_MGMT_INIT 0
|
||||
|
||||
struct hci_pinfo {
|
||||
struct bt_sock bt;
|
||||
struct hci_dev *hdev;
|
||||
struct hci_filter filter;
|
||||
__u32 cmsg_mask;
|
||||
unsigned short channel;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
/* HCI security filter */
|
||||
|
@ -482,10 +482,11 @@ struct l2cap_chan {
|
||||
__u32 remote_acc_lat;
|
||||
__u32 remote_flush_to;
|
||||
|
||||
struct timer_list chan_timer;
|
||||
struct timer_list retrans_timer;
|
||||
struct timer_list monitor_timer;
|
||||
struct timer_list ack_timer;
|
||||
struct delayed_work chan_timer;
|
||||
struct delayed_work retrans_timer;
|
||||
struct delayed_work monitor_timer;
|
||||
struct delayed_work ack_timer;
|
||||
|
||||
struct sk_buff *tx_send_head;
|
||||
struct sk_buff_head tx_q;
|
||||
struct sk_buff_head srej_q;
|
||||
@ -521,7 +522,7 @@ struct l2cap_conn {
|
||||
__u8 info_state;
|
||||
__u8 info_ident;
|
||||
|
||||
struct timer_list info_timer;
|
||||
struct delayed_work info_work;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
@ -535,7 +536,7 @@ struct l2cap_conn {
|
||||
struct smp_chan *smp_chan;
|
||||
|
||||
struct list_head chan_l;
|
||||
rwlock_t chan_lock;
|
||||
struct mutex chan_lock;
|
||||
};
|
||||
|
||||
#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
|
||||
@ -595,16 +596,16 @@ enum {
|
||||
};
|
||||
|
||||
#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
|
||||
#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
|
||||
#define __clear_chan_timer(c) l2cap_clear_timer(&c->chan_timer)
|
||||
#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
|
||||
L2CAP_DEFAULT_RETRANS_TO);
|
||||
#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
|
||||
#define __clear_retrans_timer(c) l2cap_clear_timer(&c->retrans_timer)
|
||||
#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \
|
||||
L2CAP_DEFAULT_MONITOR_TO);
|
||||
#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
|
||||
#define __clear_monitor_timer(c) l2cap_clear_timer(&c->monitor_timer)
|
||||
#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
|
||||
L2CAP_DEFAULT_ACK_TO);
|
||||
#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
|
||||
#define __clear_ack_timer(c) l2cap_clear_timer(&c->ack_timer)
|
||||
|
||||
static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
|
||||
{
|
||||
@ -805,7 +806,8 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
|
||||
struct l2cap_chan *l2cap_chan_create(struct sock *sk);
|
||||
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
|
||||
void l2cap_chan_destroy(struct l2cap_chan *chan);
|
||||
int l2cap_chan_connect(struct l2cap_chan *chan);
|
||||
inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
||||
bdaddr_t *dst);
|
||||
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
|
||||
u32 priority);
|
||||
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
|
||||
|
@ -61,22 +61,29 @@ struct mgmt_rp_read_index_list {
|
||||
/* Reserve one extra byte for names in management messages so that they
|
||||
* are always guaranteed to be nul-terminated */
|
||||
#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
|
||||
#define MGMT_MAX_SHORT_NAME_LENGTH (10 + 1)
|
||||
|
||||
#define MGMT_SETTING_POWERED 0x00000001
|
||||
#define MGMT_SETTING_CONNECTABLE 0x00000002
|
||||
#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
|
||||
#define MGMT_SETTING_DISCOVERABLE 0x00000008
|
||||
#define MGMT_SETTING_PAIRABLE 0x00000010
|
||||
#define MGMT_SETTING_LINK_SECURITY 0x00000020
|
||||
#define MGMT_SETTING_SSP 0x00000040
|
||||
#define MGMT_SETTING_BREDR 0x00000080
|
||||
#define MGMT_SETTING_HS 0x00000100
|
||||
#define MGMT_SETTING_LE 0x00000200
|
||||
|
||||
#define MGMT_OP_READ_INFO 0x0004
|
||||
struct mgmt_rp_read_info {
|
||||
__u8 type;
|
||||
__u8 powered;
|
||||
__u8 connectable;
|
||||
__u8 discoverable;
|
||||
__u8 pairable;
|
||||
__u8 sec_mode;
|
||||
bdaddr_t bdaddr;
|
||||
__u8 version;
|
||||
__le16 manufacturer;
|
||||
__le32 supported_settings;
|
||||
__le32 current_settings;
|
||||
__u8 dev_class[3];
|
||||
__u8 features[8];
|
||||
__u16 manufacturer;
|
||||
__u8 hci_ver;
|
||||
__u16 hci_rev;
|
||||
__u8 name[MGMT_MAX_NAME_LENGTH];
|
||||
__u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
|
||||
} __packed;
|
||||
|
||||
struct mgmt_mode {
|
||||
@ -93,28 +100,38 @@ struct mgmt_cp_set_discoverable {
|
||||
|
||||
#define MGMT_OP_SET_CONNECTABLE 0x0007
|
||||
|
||||
#define MGMT_OP_SET_PAIRABLE 0x0008
|
||||
#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
|
||||
|
||||
#define MGMT_OP_ADD_UUID 0x0009
|
||||
struct mgmt_cp_add_uuid {
|
||||
__u8 uuid[16];
|
||||
__u8 svc_hint;
|
||||
} __packed;
|
||||
#define MGMT_OP_SET_PAIRABLE 0x0009
|
||||
|
||||
#define MGMT_OP_REMOVE_UUID 0x000A
|
||||
struct mgmt_cp_remove_uuid {
|
||||
__u8 uuid[16];
|
||||
} __packed;
|
||||
#define MGMT_OP_SET_LINK_SECURITY 0x000A
|
||||
|
||||
#define MGMT_OP_SET_DEV_CLASS 0x000B
|
||||
#define MGMT_OP_SET_SSP 0x000B
|
||||
|
||||
#define MGMT_OP_SET_HS 0x000C
|
||||
|
||||
#define MGMT_OP_SET_LE 0x000D
|
||||
|
||||
#define MGMT_OP_SET_DEV_CLASS 0x000E
|
||||
struct mgmt_cp_set_dev_class {
|
||||
__u8 major;
|
||||
__u8 minor;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_SET_SERVICE_CACHE 0x000C
|
||||
struct mgmt_cp_set_service_cache {
|
||||
__u8 enable;
|
||||
#define MGMT_OP_SET_LOCAL_NAME 0x000F
|
||||
struct mgmt_cp_set_local_name {
|
||||
__u8 name[MGMT_MAX_NAME_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_ADD_UUID 0x0010
|
||||
struct mgmt_cp_add_uuid {
|
||||
__u8 uuid[16];
|
||||
__u8 svc_hint;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_REMOVE_UUID 0x0011
|
||||
struct mgmt_cp_remove_uuid {
|
||||
__u8 uuid[16];
|
||||
} __packed;
|
||||
|
||||
struct mgmt_link_key_info {
|
||||
@ -124,14 +141,14 @@ struct mgmt_link_key_info {
|
||||
u8 pin_len;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_LOAD_LINK_KEYS 0x000D
|
||||
#define MGMT_OP_LOAD_LINK_KEYS 0x0012
|
||||
struct mgmt_cp_load_link_keys {
|
||||
__u8 debug_keys;
|
||||
__le16 key_count;
|
||||
struct mgmt_link_key_info keys[0];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_REMOVE_KEYS 0x000E
|
||||
#define MGMT_OP_REMOVE_KEYS 0x0013
|
||||
struct mgmt_cp_remove_keys {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 disconnect;
|
||||
@ -141,7 +158,7 @@ struct mgmt_rp_remove_keys {
|
||||
__u8 status;
|
||||
};
|
||||
|
||||
#define MGMT_OP_DISCONNECT 0x000F
|
||||
#define MGMT_OP_DISCONNECT 0x0014
|
||||
struct mgmt_cp_disconnect {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
@ -160,13 +177,13 @@ struct mgmt_addr_info {
|
||||
__u8 type;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_GET_CONNECTIONS 0x0010
|
||||
#define MGMT_OP_GET_CONNECTIONS 0x0015
|
||||
struct mgmt_rp_get_connections {
|
||||
__le16 conn_count;
|
||||
struct mgmt_addr_info addr[0];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_PIN_CODE_REPLY 0x0011
|
||||
#define MGMT_OP_PIN_CODE_REPLY 0x0016
|
||||
struct mgmt_cp_pin_code_reply {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 pin_len;
|
||||
@ -177,17 +194,17 @@ struct mgmt_rp_pin_code_reply {
|
||||
uint8_t status;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012
|
||||
#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
|
||||
struct mgmt_cp_pin_code_neg_reply {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_SET_IO_CAPABILITY 0x0013
|
||||
#define MGMT_OP_SET_IO_CAPABILITY 0x0018
|
||||
struct mgmt_cp_set_io_capability {
|
||||
__u8 io_capability;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_PAIR_DEVICE 0x0014
|
||||
#define MGMT_OP_PAIR_DEVICE 0x0019
|
||||
struct mgmt_cp_pair_device {
|
||||
struct mgmt_addr_info addr;
|
||||
__u8 io_cap;
|
||||
@ -197,7 +214,7 @@ struct mgmt_rp_pair_device {
|
||||
__u8 status;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_USER_CONFIRM_REPLY 0x0015
|
||||
#define MGMT_OP_USER_CONFIRM_REPLY 0x001A
|
||||
struct mgmt_cp_user_confirm_reply {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
@ -206,64 +223,71 @@ struct mgmt_rp_user_confirm_reply {
|
||||
__u8 status;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016
|
||||
|
||||
#define MGMT_OP_SET_LOCAL_NAME 0x0017
|
||||
struct mgmt_cp_set_local_name {
|
||||
__u8 name[MGMT_MAX_NAME_LENGTH];
|
||||
#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001B
|
||||
struct mgmt_cp_user_confirm_neg_reply {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0018
|
||||
#define MGMT_OP_USER_PASSKEY_REPLY 0x001C
|
||||
struct mgmt_cp_user_passkey_reply {
|
||||
bdaddr_t bdaddr;
|
||||
__le32 passkey;
|
||||
} __packed;
|
||||
struct mgmt_rp_user_passkey_reply {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 status;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001D
|
||||
struct mgmt_cp_user_passkey_neg_reply {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_READ_LOCAL_OOB_DATA 0x001E
|
||||
struct mgmt_rp_read_local_oob_data {
|
||||
__u8 hash[16];
|
||||
__u8 randomizer[16];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0019
|
||||
#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x001F
|
||||
struct mgmt_cp_add_remote_oob_data {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 hash[16];
|
||||
__u8 randomizer[16];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x001A
|
||||
#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0020
|
||||
struct mgmt_cp_remove_remote_oob_data {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_START_DISCOVERY 0x001B
|
||||
#define MGMT_OP_START_DISCOVERY 0x0021
|
||||
struct mgmt_cp_start_discovery {
|
||||
__u8 type;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_STOP_DISCOVERY 0x001C
|
||||
#define MGMT_OP_STOP_DISCOVERY 0x0022
|
||||
|
||||
#define MGMT_OP_BLOCK_DEVICE 0x001D
|
||||
#define MGMT_OP_CONFIRM_NAME 0x0023
|
||||
struct mgmt_cp_confirm_name {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 name_known;
|
||||
} __packed;
|
||||
struct mgmt_rp_confirm_name {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 status;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_BLOCK_DEVICE 0x0024
|
||||
struct mgmt_cp_block_device {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_UNBLOCK_DEVICE 0x001E
|
||||
#define MGMT_OP_UNBLOCK_DEVICE 0x0025
|
||||
struct mgmt_cp_unblock_device {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_SET_FAST_CONNECTABLE 0x001F
|
||||
struct mgmt_cp_set_fast_connectable {
|
||||
__u8 enable;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_USER_PASSKEY_REPLY 0x0020
|
||||
struct mgmt_cp_user_passkey_reply {
|
||||
bdaddr_t bdaddr;
|
||||
__le32 passkey;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x0021
|
||||
struct mgmt_cp_user_passkey_neg_reply {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_CMD_COMPLETE 0x0001
|
||||
struct mgmt_ev_cmd_complete {
|
||||
__le16 opcode;
|
||||
@ -285,81 +309,82 @@ struct mgmt_ev_controller_error {
|
||||
|
||||
#define MGMT_EV_INDEX_REMOVED 0x0005
|
||||
|
||||
#define MGMT_EV_POWERED 0x0006
|
||||
#define MGMT_EV_NEW_SETTINGS 0x0006
|
||||
|
||||
#define MGMT_EV_DISCOVERABLE 0x0007
|
||||
#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
|
||||
struct mgmt_ev_class_of_dev_changed {
|
||||
__u8 dev_class[3];
|
||||
};
|
||||
|
||||
#define MGMT_EV_CONNECTABLE 0x0008
|
||||
#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
|
||||
struct mgmt_ev_local_name_changed {
|
||||
__u8 name[MGMT_MAX_NAME_LENGTH];
|
||||
__u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_PAIRABLE 0x0009
|
||||
|
||||
#define MGMT_EV_NEW_LINK_KEY 0x000A
|
||||
#define MGMT_EV_NEW_LINK_KEY 0x0009
|
||||
struct mgmt_ev_new_link_key {
|
||||
__u8 store_hint;
|
||||
struct mgmt_link_key_info key;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_CONNECTED 0x000B
|
||||
#define MGMT_EV_CONNECTED 0x000A
|
||||
|
||||
#define MGMT_EV_DISCONNECTED 0x000C
|
||||
#define MGMT_EV_DISCONNECTED 0x000B
|
||||
|
||||
#define MGMT_EV_CONNECT_FAILED 0x000D
|
||||
#define MGMT_EV_CONNECT_FAILED 0x000C
|
||||
struct mgmt_ev_connect_failed {
|
||||
struct mgmt_addr_info addr;
|
||||
__u8 status;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_PIN_CODE_REQUEST 0x000E
|
||||
#define MGMT_EV_PIN_CODE_REQUEST 0x000D
|
||||
struct mgmt_ev_pin_code_request {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 secure;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
|
||||
#define MGMT_EV_USER_CONFIRM_REQUEST 0x000E
|
||||
struct mgmt_ev_user_confirm_request {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 confirm_hint;
|
||||
__le32 value;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_USER_PASSKEY_REQUEST 0x000F
|
||||
struct mgmt_ev_user_passkey_request {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_AUTH_FAILED 0x0010
|
||||
struct mgmt_ev_auth_failed {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 status;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_LOCAL_NAME_CHANGED 0x0011
|
||||
struct mgmt_ev_local_name_changed {
|
||||
__u8 name[MGMT_MAX_NAME_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_DEVICE_FOUND 0x0012
|
||||
#define MGMT_EV_DEVICE_FOUND 0x0011
|
||||
struct mgmt_ev_device_found {
|
||||
struct mgmt_addr_info addr;
|
||||
__u8 dev_class[3];
|
||||
__s8 rssi;
|
||||
__u8 confirm_name;
|
||||
__u8 eir[HCI_MAX_EIR_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_REMOTE_NAME 0x0013
|
||||
#define MGMT_EV_REMOTE_NAME 0x0012
|
||||
struct mgmt_ev_remote_name {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 name[MGMT_MAX_NAME_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_DISCOVERING 0x0014
|
||||
#define MGMT_EV_DISCOVERING 0x0013
|
||||
|
||||
#define MGMT_EV_DEVICE_BLOCKED 0x0015
|
||||
#define MGMT_EV_DEVICE_BLOCKED 0x0014
|
||||
struct mgmt_ev_device_blocked {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_DEVICE_UNBLOCKED 0x0016
|
||||
#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
|
||||
struct mgmt_ev_device_unblocked {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_USER_PASSKEY_REQUEST 0x0017
|
||||
struct mgmt_ev_user_passkey_request {
|
||||
bdaddr_t bdaddr;
|
||||
} __packed;
|
||||
|
@ -275,9 +275,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
|
||||
}
|
||||
}
|
||||
|
||||
static void hci_conn_timeout(unsigned long arg)
|
||||
static void hci_conn_timeout(struct work_struct *work)
|
||||
{
|
||||
struct hci_conn *conn = (void *) arg;
|
||||
struct hci_conn *conn = container_of(work, struct hci_conn,
|
||||
disc_work.work);
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
__u8 reason;
|
||||
|
||||
@ -311,6 +312,42 @@ static void hci_conn_timeout(unsigned long arg)
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
/* Enter sniff mode */
|
||||
static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
BT_DBG("conn %p mode %d", conn, conn->mode);
|
||||
|
||||
if (test_bit(HCI_RAW, &hdev->flags))
|
||||
return;
|
||||
|
||||
if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
|
||||
return;
|
||||
|
||||
if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
|
||||
return;
|
||||
|
||||
if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
|
||||
struct hci_cp_sniff_subrate cp;
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
cp.max_latency = cpu_to_le16(0);
|
||||
cp.min_remote_timeout = cpu_to_le16(0);
|
||||
cp.min_local_timeout = cpu_to_le16(0);
|
||||
hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
|
||||
struct hci_cp_sniff_mode cp;
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
|
||||
cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
|
||||
cp.attempt = cpu_to_le16(4);
|
||||
cp.timeout = cpu_to_le16(1);
|
||||
hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
|
||||
}
|
||||
}
|
||||
|
||||
static void hci_conn_idle(unsigned long arg)
|
||||
{
|
||||
struct hci_conn *conn = (void *) arg;
|
||||
@ -325,12 +362,8 @@ static void hci_conn_auto_accept(unsigned long arg)
|
||||
struct hci_conn *conn = (void *) arg;
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
|
||||
&conn->dst);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
|
||||
@ -374,9 +407,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
|
||||
|
||||
skb_queue_head_init(&conn->data_q);
|
||||
|
||||
hci_chan_hash_init(conn);
|
||||
INIT_LIST_HEAD(&conn->chan_list);;
|
||||
|
||||
setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
|
||||
INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
|
||||
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
|
||||
setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
|
||||
(unsigned long) conn);
|
||||
@ -385,8 +418,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
|
||||
|
||||
hci_dev_hold(hdev);
|
||||
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
|
||||
hci_conn_hash_add(hdev, conn);
|
||||
if (hdev->notify)
|
||||
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
|
||||
@ -395,8 +426,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
|
||||
|
||||
hci_conn_init_sysfs(conn);
|
||||
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
@ -408,7 +437,7 @@ int hci_conn_del(struct hci_conn *conn)
|
||||
|
||||
del_timer(&conn->idle_timer);
|
||||
|
||||
del_timer(&conn->disc_timer);
|
||||
cancel_delayed_work_sync(&conn->disc_work);
|
||||
|
||||
del_timer(&conn->auto_accept_timer);
|
||||
|
||||
@ -432,16 +461,13 @@ int hci_conn_del(struct hci_conn *conn)
|
||||
}
|
||||
}
|
||||
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
|
||||
hci_chan_hash_flush(conn);
|
||||
hci_chan_list_flush(conn);
|
||||
|
||||
hci_conn_hash_del(hdev, conn);
|
||||
if (hdev->notify)
|
||||
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
|
||||
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
|
||||
skb_queue_purge(&conn->data_q);
|
||||
|
||||
hci_conn_put_device(conn);
|
||||
@ -674,7 +700,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
|
||||
goto encrypt;
|
||||
|
||||
auth:
|
||||
if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
|
||||
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
|
||||
return 0;
|
||||
|
||||
if (!hci_conn_auth(conn, sec_level, auth_type))
|
||||
@ -767,57 +793,15 @@ timer:
|
||||
jiffies + msecs_to_jiffies(hdev->idle_timeout));
|
||||
}
|
||||
|
||||
/* Enter sniff mode */
|
||||
void hci_conn_enter_sniff_mode(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
BT_DBG("conn %p mode %d", conn, conn->mode);
|
||||
|
||||
if (test_bit(HCI_RAW, &hdev->flags))
|
||||
return;
|
||||
|
||||
if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
|
||||
return;
|
||||
|
||||
if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
|
||||
return;
|
||||
|
||||
if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
|
||||
struct hci_cp_sniff_subrate cp;
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
cp.max_latency = cpu_to_le16(0);
|
||||
cp.min_remote_timeout = cpu_to_le16(0);
|
||||
cp.min_local_timeout = cpu_to_le16(0);
|
||||
hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
|
||||
struct hci_cp_sniff_mode cp;
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
|
||||
cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
|
||||
cp.attempt = cpu_to_le16(4);
|
||||
cp.timeout = cpu_to_le16(1);
|
||||
hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
|
||||
}
|
||||
}
|
||||
|
||||
/* Drop all connection on the device */
|
||||
void hci_conn_hash_flush(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
struct list_head *p;
|
||||
struct hci_conn *c;
|
||||
|
||||
BT_DBG("hdev %s", hdev->name);
|
||||
|
||||
p = h->list.next;
|
||||
while (p != &h->list) {
|
||||
struct hci_conn *c;
|
||||
|
||||
c = list_entry(p, struct hci_conn, list);
|
||||
p = p->next;
|
||||
|
||||
list_for_each_entry_rcu(c, &h->list, list) {
|
||||
c->state = BT_CLOSED;
|
||||
|
||||
hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
|
||||
@ -882,7 +866,7 @@ int hci_get_conn_list(void __user *arg)
|
||||
|
||||
ci = cl->conn_info;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
list_for_each_entry(c, &hdev->conn_hash.list, list) {
|
||||
bacpy(&(ci + n)->bdaddr, &c->dst);
|
||||
(ci + n)->handle = c->handle;
|
||||
@ -893,7 +877,7 @@ int hci_get_conn_list(void __user *arg)
|
||||
if (++n >= req.conn_num)
|
||||
break;
|
||||
}
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
cl->dev_id = hdev->id;
|
||||
cl->conn_num = n;
|
||||
@ -917,7 +901,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
|
||||
if (copy_from_user(&req, arg, sizeof(req)))
|
||||
return -EFAULT;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
|
||||
if (conn) {
|
||||
bacpy(&ci.bdaddr, &conn->dst);
|
||||
@ -927,7 +911,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
|
||||
ci.state = conn->state;
|
||||
ci.link_mode = conn->link_mode;
|
||||
}
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
if (!conn)
|
||||
return -ENOENT;
|
||||
@ -943,11 +927,11 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
|
||||
if (copy_from_user(&req, arg, sizeof(req)))
|
||||
return -EFAULT;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
|
||||
if (conn)
|
||||
req.type = conn->auth_type;
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
if (!conn)
|
||||
return -ENOENT;
|
||||
@ -969,9 +953,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
|
||||
chan->conn = conn;
|
||||
skb_queue_head_init(&chan->data_q);
|
||||
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
hci_chan_hash_add(conn, chan);
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
list_add_rcu(&chan->list, &conn->chan_list);
|
||||
|
||||
return chan;
|
||||
}
|
||||
@ -983,9 +965,9 @@ int hci_chan_del(struct hci_chan *chan)
|
||||
|
||||
BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
|
||||
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
hci_chan_hash_del(conn, chan);
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
list_del_rcu(&chan->list);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
skb_queue_purge(&chan->data_q);
|
||||
kfree(chan);
|
||||
@ -993,13 +975,12 @@ int hci_chan_del(struct hci_chan *chan)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hci_chan_hash_flush(struct hci_conn *conn)
|
||||
void hci_chan_list_flush(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_chan_hash *h = &conn->chan_hash;
|
||||
struct hci_chan *chan, *tmp;
|
||||
struct hci_chan *chan;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
list_for_each_entry_safe(chan, tmp, &h->list, list)
|
||||
list_for_each_entry_rcu(chan, &conn->chan_list, list)
|
||||
hci_chan_del(chan);
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
BlueZ - Bluetooth protocol stack for Linux
|
||||
Copyright (C) 2000-2001 Qualcomm Incorporated
|
||||
Copyright (C) 2011 ProFUSION Embedded Systems
|
||||
|
||||
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
|
||||
|
||||
@ -56,11 +57,11 @@
|
||||
|
||||
int enable_hs;
|
||||
|
||||
static void hci_cmd_task(unsigned long arg);
|
||||
static void hci_rx_task(unsigned long arg);
|
||||
static void hci_tx_task(unsigned long arg);
|
||||
static void hci_rx_work(struct work_struct *work);
|
||||
static void hci_cmd_work(struct work_struct *work);
|
||||
static void hci_tx_work(struct work_struct *work);
|
||||
|
||||
static DEFINE_RWLOCK(hci_task_lock);
|
||||
static DEFINE_MUTEX(hci_task_lock);
|
||||
|
||||
/* HCI device list */
|
||||
LIST_HEAD(hci_dev_list);
|
||||
@ -209,7 +210,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
|
||||
skb->dev = (void *) hdev;
|
||||
|
||||
skb_queue_tail(&hdev->cmd_q, skb);
|
||||
tasklet_schedule(&hdev->cmd_task);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
skb_queue_purge(&hdev->driver_init);
|
||||
|
||||
@ -433,14 +434,14 @@ int hci_inquiry(void __user *arg)
|
||||
if (!hdev)
|
||||
return -ENODEV;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
|
||||
inquiry_cache_empty(hdev) ||
|
||||
ir.flags & IREQ_CACHE_FLUSH) {
|
||||
inquiry_cache_flush(hdev);
|
||||
do_inquiry = 1;
|
||||
}
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
timeo = ir.length * msecs_to_jiffies(2000);
|
||||
|
||||
@ -462,9 +463,9 @@ int hci_inquiry(void __user *arg)
|
||||
goto done;
|
||||
}
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
BT_DBG("num_rsp %d", ir.num_rsp);
|
||||
|
||||
@ -541,15 +542,15 @@ int hci_dev_open(__u16 dev)
|
||||
set_bit(HCI_UP, &hdev->flags);
|
||||
hci_notify(hdev, HCI_DEV_UP);
|
||||
if (!test_bit(HCI_SETUP, &hdev->flags)) {
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_powered(hdev, 1);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
} else {
|
||||
/* Init failed, cleanup */
|
||||
tasklet_kill(&hdev->rx_task);
|
||||
tasklet_kill(&hdev->tx_task);
|
||||
tasklet_kill(&hdev->cmd_task);
|
||||
flush_work(&hdev->tx_work);
|
||||
flush_work(&hdev->cmd_work);
|
||||
flush_work(&hdev->rx_work);
|
||||
|
||||
skb_queue_purge(&hdev->cmd_q);
|
||||
skb_queue_purge(&hdev->rx_q);
|
||||
@ -585,9 +586,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Kill RX and TX tasks */
|
||||
tasklet_kill(&hdev->rx_task);
|
||||
tasklet_kill(&hdev->tx_task);
|
||||
/* Flush RX and TX works */
|
||||
flush_work(&hdev->tx_work);
|
||||
flush_work(&hdev->rx_work);
|
||||
|
||||
if (hdev->discov_timeout > 0) {
|
||||
cancel_delayed_work(&hdev->discov_off);
|
||||
@ -597,10 +598,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
||||
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
|
||||
cancel_delayed_work(&hdev->power_off);
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
|
||||
cancel_delayed_work(&hdev->service_cache);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
inquiry_cache_flush(hdev);
|
||||
hci_conn_hash_flush(hdev);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
hci_notify(hdev, HCI_DEV_DOWN);
|
||||
|
||||
@ -617,8 +621,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
||||
clear_bit(HCI_INIT, &hdev->flags);
|
||||
}
|
||||
|
||||
/* Kill cmd task */
|
||||
tasklet_kill(&hdev->cmd_task);
|
||||
/* flush cmd work */
|
||||
flush_work(&hdev->cmd_work);
|
||||
|
||||
/* Drop queues */
|
||||
skb_queue_purge(&hdev->rx_q);
|
||||
@ -636,9 +640,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
||||
* and no tasks are scheduled. */
|
||||
hdev->close(hdev);
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_powered(hdev, 0);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
/* Clear flags */
|
||||
hdev->flags = 0;
|
||||
@ -672,7 +676,6 @@ int hci_dev_reset(__u16 dev)
|
||||
return -ENODEV;
|
||||
|
||||
hci_req_lock(hdev);
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
|
||||
if (!test_bit(HCI_UP, &hdev->flags))
|
||||
goto done;
|
||||
@ -681,10 +684,10 @@ int hci_dev_reset(__u16 dev)
|
||||
skb_queue_purge(&hdev->rx_q);
|
||||
skb_queue_purge(&hdev->cmd_q);
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
inquiry_cache_flush(hdev);
|
||||
hci_conn_hash_flush(hdev);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
if (hdev->flush)
|
||||
hdev->flush(hdev);
|
||||
@ -697,7 +700,6 @@ int hci_dev_reset(__u16 dev)
|
||||
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
||||
|
||||
done:
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
hci_req_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
return ret;
|
||||
@ -939,7 +941,7 @@ static void hci_power_on(struct work_struct *work)
|
||||
return;
|
||||
|
||||
if (test_bit(HCI_AUTO_OFF, &hdev->flags))
|
||||
queue_delayed_work(hdev->workqueue, &hdev->power_off,
|
||||
schedule_delayed_work(&hdev->power_off,
|
||||
msecs_to_jiffies(AUTO_OFF_TIMEOUT));
|
||||
|
||||
if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
|
||||
@ -967,13 +969,13 @@ static void hci_discov_off(struct work_struct *work)
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
|
||||
|
||||
hdev->discov_timeout = 0;
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
int hci_uuids_clear(struct hci_dev *hdev)
|
||||
@ -1207,7 +1209,7 @@ static void hci_cmd_timer(unsigned long arg)
|
||||
|
||||
BT_ERR("%s command tx timeout", hdev->name);
|
||||
atomic_set(&hdev->cmd_cnt, 1);
|
||||
tasklet_schedule(&hdev->cmd_task);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
|
||||
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
|
||||
@ -1340,9 +1342,10 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
||||
return mgmt_device_unblocked(hdev, bdaddr);
|
||||
}
|
||||
|
||||
static void hci_clear_adv_cache(unsigned long arg)
|
||||
static void hci_clear_adv_cache(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = (void *) arg;
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
adv_work.work);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
@ -1443,7 +1446,7 @@ int hci_register_dev(struct hci_dev *hdev)
|
||||
list_add_tail(&hdev->list, head);
|
||||
|
||||
atomic_set(&hdev->refcnt, 1);
|
||||
spin_lock_init(&hdev->lock);
|
||||
mutex_init(&hdev->lock);
|
||||
|
||||
hdev->flags = 0;
|
||||
hdev->dev_flags = 0;
|
||||
@ -1456,9 +1459,10 @@ int hci_register_dev(struct hci_dev *hdev)
|
||||
hdev->sniff_max_interval = 800;
|
||||
hdev->sniff_min_interval = 80;
|
||||
|
||||
tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
|
||||
tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
|
||||
tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
|
||||
INIT_WORK(&hdev->rx_work, hci_rx_work);
|
||||
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
|
||||
INIT_WORK(&hdev->tx_work, hci_tx_work);
|
||||
|
||||
|
||||
skb_queue_head_init(&hdev->rx_q);
|
||||
skb_queue_head_init(&hdev->cmd_q);
|
||||
@ -1487,9 +1491,8 @@ int hci_register_dev(struct hci_dev *hdev)
|
||||
INIT_LIST_HEAD(&hdev->remote_oob_data);
|
||||
|
||||
INIT_LIST_HEAD(&hdev->adv_entries);
|
||||
setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
|
||||
(unsigned long) hdev);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
|
||||
INIT_WORK(&hdev->power_on, hci_power_on);
|
||||
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
|
||||
|
||||
@ -1501,7 +1504,8 @@ int hci_register_dev(struct hci_dev *hdev)
|
||||
|
||||
write_unlock_bh(&hci_dev_list_lock);
|
||||
|
||||
hdev->workqueue = create_singlethread_workqueue(hdev->name);
|
||||
hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
|
||||
WQ_MEM_RECLAIM, 1);
|
||||
if (!hdev->workqueue) {
|
||||
error = -ENOMEM;
|
||||
goto err;
|
||||
@ -1522,7 +1526,7 @@ int hci_register_dev(struct hci_dev *hdev)
|
||||
|
||||
set_bit(HCI_AUTO_OFF, &hdev->flags);
|
||||
set_bit(HCI_SETUP, &hdev->flags);
|
||||
queue_work(hdev->workqueue, &hdev->power_on);
|
||||
schedule_work(&hdev->power_on);
|
||||
|
||||
hci_notify(hdev, HCI_DEV_REG);
|
||||
|
||||
@ -1557,9 +1561,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
|
||||
if (!test_bit(HCI_INIT, &hdev->flags) &&
|
||||
!test_bit(HCI_SETUP, &hdev->flags)) {
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_index_removed(hdev);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
/* mgmt_index_removed should take care of emptying the
|
||||
@ -1575,17 +1579,17 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
|
||||
hci_del_sysfs(hdev);
|
||||
|
||||
del_timer(&hdev->adv_timer);
|
||||
cancel_delayed_work_sync(&hdev->adv_work);
|
||||
|
||||
destroy_workqueue(hdev->workqueue);
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
hci_blacklist_clear(hdev);
|
||||
hci_uuids_clear(hdev);
|
||||
hci_link_keys_clear(hdev);
|
||||
hci_remote_oob_data_clear(hdev);
|
||||
hci_adv_entries_clear(hdev);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
__hci_dev_put(hdev);
|
||||
}
|
||||
@ -1623,9 +1627,8 @@ int hci_recv_frame(struct sk_buff *skb)
|
||||
/* Time stamp */
|
||||
__net_timestamp(skb);
|
||||
|
||||
/* Queue frame for rx task */
|
||||
skb_queue_tail(&hdev->rx_q, skb);
|
||||
tasklet_schedule(&hdev->rx_task);
|
||||
queue_work(hdev->workqueue, &hdev->rx_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1808,14 +1811,14 @@ int hci_register_proto(struct hci_proto *hp)
|
||||
if (hp->id >= HCI_MAX_PROTO)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock_bh(&hci_task_lock);
|
||||
mutex_lock(&hci_task_lock);
|
||||
|
||||
if (!hci_proto[hp->id])
|
||||
hci_proto[hp->id] = hp;
|
||||
else
|
||||
err = -EEXIST;
|
||||
|
||||
write_unlock_bh(&hci_task_lock);
|
||||
mutex_unlock(&hci_task_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1830,14 +1833,14 @@ int hci_unregister_proto(struct hci_proto *hp)
|
||||
if (hp->id >= HCI_MAX_PROTO)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock_bh(&hci_task_lock);
|
||||
mutex_lock(&hci_task_lock);
|
||||
|
||||
if (hci_proto[hp->id])
|
||||
hci_proto[hp->id] = NULL;
|
||||
else
|
||||
err = -ENOENT;
|
||||
|
||||
write_unlock_bh(&hci_task_lock);
|
||||
mutex_unlock(&hci_task_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1922,7 +1925,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
|
||||
hdev->init_last_cmd = opcode;
|
||||
|
||||
skb_queue_tail(&hdev->cmd_q, skb);
|
||||
tasklet_schedule(&hdev->cmd_task);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2012,7 +2015,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
|
||||
|
||||
hci_queue_acl(conn, &chan->data_q, skb, flags);
|
||||
|
||||
tasklet_schedule(&hdev->tx_task);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
}
|
||||
EXPORT_SYMBOL(hci_send_acl);
|
||||
|
||||
@ -2035,7 +2038,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
|
||||
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
|
||||
|
||||
skb_queue_tail(&conn->data_q, skb);
|
||||
tasklet_schedule(&hdev->tx_task);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
}
|
||||
EXPORT_SYMBOL(hci_send_sco);
|
||||
|
||||
@ -2050,7 +2053,10 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
|
||||
|
||||
/* We don't have to lock device here. Connections are always
|
||||
* added and removed with TX task disabled. */
|
||||
list_for_each_entry(c, &h->list, list) {
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &h->list, list) {
|
||||
if (c->type != type || skb_queue_empty(&c->data_q))
|
||||
continue;
|
||||
|
||||
@ -2068,6 +2074,8 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
|
||||
break;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (conn) {
|
||||
int cnt, q;
|
||||
|
||||
@ -2103,14 +2111,18 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
|
||||
|
||||
BT_ERR("%s link tx timeout", hdev->name);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* Kill stalled connections */
|
||||
list_for_each_entry(c, &h->list, list) {
|
||||
list_for_each_entry_rcu(c, &h->list, list) {
|
||||
if (c->type == type && c->sent) {
|
||||
BT_ERR("%s killing stalled connection %s",
|
||||
hdev->name, batostr(&c->dst));
|
||||
hci_acl_disconn(c, 0x13);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
|
||||
@ -2124,8 +2136,9 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
list_for_each_entry(conn, &h->list, list) {
|
||||
struct hci_chan_hash *ch;
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(conn, &h->list, list) {
|
||||
struct hci_chan *tmp;
|
||||
|
||||
if (conn->type != type)
|
||||
@ -2136,9 +2149,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
|
||||
|
||||
conn_num++;
|
||||
|
||||
ch = &conn->chan_hash;
|
||||
|
||||
list_for_each_entry(tmp, &ch->list, list) {
|
||||
list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (skb_queue_empty(&tmp->data_q))
|
||||
@ -2166,6 +2177,8 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
|
||||
break;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!chan)
|
||||
return NULL;
|
||||
|
||||
@ -2199,8 +2212,9 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
list_for_each_entry(conn, &h->list, list) {
|
||||
struct hci_chan_hash *ch;
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(conn, &h->list, list) {
|
||||
struct hci_chan *chan;
|
||||
|
||||
if (conn->type != type)
|
||||
@ -2211,8 +2225,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
|
||||
|
||||
num++;
|
||||
|
||||
ch = &conn->chan_hash;
|
||||
list_for_each_entry(chan, &ch->list, list) {
|
||||
list_for_each_entry_rcu(chan, &conn->chan_list, list) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (chan->sent) {
|
||||
@ -2236,6 +2249,9 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
|
||||
if (hci_conn_num(hdev, type) == num)
|
||||
break;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
}
|
||||
|
||||
static inline void hci_sched_acl(struct hci_dev *hdev)
|
||||
@ -2386,12 +2402,12 @@ static inline void hci_sched_le(struct hci_dev *hdev)
|
||||
hci_prio_recalculate(hdev, LE_LINK);
|
||||
}
|
||||
|
||||
static void hci_tx_task(unsigned long arg)
|
||||
static void hci_tx_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = (struct hci_dev *) arg;
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
|
||||
struct sk_buff *skb;
|
||||
|
||||
read_lock(&hci_task_lock);
|
||||
mutex_lock(&hci_task_lock);
|
||||
|
||||
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
|
||||
hdev->sco_cnt, hdev->le_cnt);
|
||||
@ -2410,7 +2426,7 @@ static void hci_tx_task(unsigned long arg)
|
||||
while ((skb = skb_dequeue(&hdev->raw_q)))
|
||||
hci_send_frame(skb);
|
||||
|
||||
read_unlock(&hci_task_lock);
|
||||
mutex_unlock(&hci_task_lock);
|
||||
}
|
||||
|
||||
/* ----- HCI RX task (incoming data processing) ----- */
|
||||
@ -2439,7 +2455,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
if (conn) {
|
||||
register struct hci_proto *hp;
|
||||
|
||||
hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
|
||||
hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
|
||||
|
||||
/* Send to upper protocol */
|
||||
hp = hci_proto[HCI_PROTO_L2CAP];
|
||||
@ -2491,14 +2507,14 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void hci_rx_task(unsigned long arg)
|
||||
static void hci_rx_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = (struct hci_dev *) arg;
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
|
||||
struct sk_buff *skb;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
read_lock(&hci_task_lock);
|
||||
mutex_lock(&hci_task_lock);
|
||||
|
||||
while ((skb = skb_dequeue(&hdev->rx_q))) {
|
||||
if (atomic_read(&hdev->promisc)) {
|
||||
@ -2524,6 +2540,7 @@ static void hci_rx_task(unsigned long arg)
|
||||
/* Process frame */
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
case HCI_EVENT_PKT:
|
||||
BT_DBG("%s Event packet", hdev->name);
|
||||
hci_event_packet(hdev, skb);
|
||||
break;
|
||||
|
||||
@ -2543,12 +2560,12 @@ static void hci_rx_task(unsigned long arg)
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock(&hci_task_lock);
|
||||
mutex_unlock(&hci_task_lock);
|
||||
}
|
||||
|
||||
static void hci_cmd_task(unsigned long arg)
|
||||
static void hci_cmd_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = (struct hci_dev *) arg;
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
|
||||
struct sk_buff *skb;
|
||||
|
||||
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
|
||||
@ -2572,7 +2589,7 @@ static void hci_cmd_task(unsigned long arg)
|
||||
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
|
||||
} else {
|
||||
skb_queue_head(&hdev->cmd_q, skb);
|
||||
tasklet_schedule(&hdev->cmd_task);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -378,11 +378,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
|
||||
|
||||
if (hdev->notify) {
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
if (hdev->notify)
|
||||
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
}
|
||||
}
|
||||
|
||||
static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
@ -409,11 +406,8 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
|
||||
|
||||
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
|
||||
|
||||
if (hdev->notify) {
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
if (hdev->notify)
|
||||
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
}
|
||||
}
|
||||
|
||||
static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
@ -773,6 +767,28 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
|
||||
}
|
||||
|
||||
static void hci_cc_read_data_block_size(struct hci_dev *hdev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct hci_rp_read_data_block_size *rp = (void *) skb->data;
|
||||
|
||||
BT_DBG("%s status 0x%x", hdev->name, rp->status);
|
||||
|
||||
if (rp->status)
|
||||
return;
|
||||
|
||||
hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
|
||||
hdev->block_len = __le16_to_cpu(rp->block_len);
|
||||
hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
|
||||
|
||||
hdev->block_cnt = hdev->num_blocks;
|
||||
|
||||
BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
|
||||
hdev->block_cnt, hdev->block_len);
|
||||
|
||||
hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
|
||||
}
|
||||
|
||||
static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
__u8 status = *((__u8 *) skb->data);
|
||||
@ -1017,7 +1033,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
||||
if (cp->enable == 0x01) {
|
||||
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
del_timer(&hdev->adv_timer);
|
||||
cancel_delayed_work_sync(&hdev->adv_work);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_adv_entries_clear(hdev);
|
||||
@ -1025,7 +1041,9 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
||||
} else if (cp->enable == 0x00) {
|
||||
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
|
||||
cancel_delayed_work_sync(&hdev->adv_work);
|
||||
queue_delayed_work(hdev->workqueue, &hdev->adv_work,
|
||||
jiffies + ADV_CLEAR_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2022,6 +2040,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
|
||||
hci_cc_read_bd_addr(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_READ_DATA_BLOCK_SIZE:
|
||||
hci_cc_read_data_block_size(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_WRITE_CA_TIMEOUT:
|
||||
hci_cc_write_ca_timeout(hdev, skb);
|
||||
break;
|
||||
@ -2116,7 +2138,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
|
||||
if (ev->ncmd) {
|
||||
atomic_set(&hdev->cmd_cnt, 1);
|
||||
if (!skb_queue_empty(&hdev->cmd_q))
|
||||
tasklet_schedule(&hdev->cmd_task);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2198,7 +2220,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
|
||||
atomic_set(&hdev->cmd_cnt, 1);
|
||||
if (!skb_queue_empty(&hdev->cmd_q))
|
||||
tasklet_schedule(&hdev->cmd_task);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2243,8 +2265,6 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
|
||||
return;
|
||||
}
|
||||
|
||||
tasklet_disable(&hdev->tx_task);
|
||||
|
||||
for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
|
||||
struct hci_conn *conn;
|
||||
__u16 handle, count;
|
||||
@ -2253,34 +2273,43 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
|
||||
count = get_unaligned_le16(ptr++);
|
||||
|
||||
conn = hci_conn_hash_lookup_handle(hdev, handle);
|
||||
if (conn) {
|
||||
conn->sent -= count;
|
||||
if (!conn)
|
||||
continue;
|
||||
|
||||
if (conn->type == ACL_LINK) {
|
||||
conn->sent -= count;
|
||||
|
||||
switch (conn->type) {
|
||||
case ACL_LINK:
|
||||
hdev->acl_cnt += count;
|
||||
if (hdev->acl_cnt > hdev->acl_pkts)
|
||||
hdev->acl_cnt = hdev->acl_pkts;
|
||||
break;
|
||||
|
||||
case LE_LINK:
|
||||
if (hdev->le_pkts) {
|
||||
hdev->le_cnt += count;
|
||||
if (hdev->le_cnt > hdev->le_pkts)
|
||||
hdev->le_cnt = hdev->le_pkts;
|
||||
} else {
|
||||
hdev->acl_cnt += count;
|
||||
if (hdev->acl_cnt > hdev->acl_pkts)
|
||||
hdev->acl_cnt = hdev->acl_pkts;
|
||||
} else if (conn->type == LE_LINK) {
|
||||
if (hdev->le_pkts) {
|
||||
hdev->le_cnt += count;
|
||||
if (hdev->le_cnt > hdev->le_pkts)
|
||||
hdev->le_cnt = hdev->le_pkts;
|
||||
} else {
|
||||
hdev->acl_cnt += count;
|
||||
if (hdev->acl_cnt > hdev->acl_pkts)
|
||||
hdev->acl_cnt = hdev->acl_pkts;
|
||||
}
|
||||
} else {
|
||||
hdev->sco_cnt += count;
|
||||
if (hdev->sco_cnt > hdev->sco_pkts)
|
||||
hdev->sco_cnt = hdev->sco_pkts;
|
||||
}
|
||||
break;
|
||||
|
||||
case SCO_LINK:
|
||||
hdev->sco_cnt += count;
|
||||
if (hdev->sco_cnt > hdev->sco_pkts)
|
||||
hdev->sco_cnt = hdev->sco_pkts;
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_ERR("Unknown type %d conn %p", conn->type, conn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tasklet_schedule(&hdev->tx_task);
|
||||
|
||||
tasklet_enable(&hdev->tx_task);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
}
|
||||
|
||||
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
@ -188,11 +188,11 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
|
||||
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
|
||||
return -EFAULT;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
err = hci_blacklist_add(hdev, &bdaddr);
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -205,11 +205,11 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
|
||||
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
|
||||
return -EFAULT;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
err = hci_blacklist_del(hdev, &bdaddr);
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -343,8 +343,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
|
||||
if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
|
||||
return -EINVAL;
|
||||
|
||||
if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
|
||||
return -EINVAL;
|
||||
if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
|
||||
if (!enable_mgmt)
|
||||
return -EINVAL;
|
||||
set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
@ -535,10 +538,10 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||
|
||||
if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
|
||||
skb_queue_tail(&hdev->raw_q, skb);
|
||||
tasklet_schedule(&hdev->tx_task);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
} else {
|
||||
skb_queue_tail(&hdev->cmd_q, skb);
|
||||
tasklet_schedule(&hdev->cmd_task);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
} else {
|
||||
if (!capable(CAP_NET_RAW)) {
|
||||
@ -547,7 +550,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||
}
|
||||
|
||||
skb_queue_tail(&hdev->raw_q, skb);
|
||||
tasklet_schedule(&hdev->tx_task);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
}
|
||||
|
||||
err = len;
|
||||
|
@ -89,11 +89,35 @@ static struct device_type bt_link = {
|
||||
.release = bt_link_release,
|
||||
};
|
||||
|
||||
static void add_conn(struct work_struct *work)
|
||||
/*
|
||||
* The rfcomm tty device will possibly retain even when conn
|
||||
* is down, and sysfs doesn't support move zombie device,
|
||||
* so we should move the device before conn device is destroyed.
|
||||
*/
|
||||
static int __match_tty(struct device *dev, void *data)
|
||||
{
|
||||
return !strncmp(dev_name(dev), "rfcomm", 6);
|
||||
}
|
||||
|
||||
void hci_conn_init_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
conn->dev.type = &bt_link;
|
||||
conn->dev.class = bt_class;
|
||||
conn->dev.parent = &hdev->dev;
|
||||
|
||||
device_initialize(&conn->dev);
|
||||
}
|
||||
|
||||
void hci_conn_add_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
@ -106,19 +130,8 @@ static void add_conn(struct work_struct *work)
|
||||
hci_dev_hold(hdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* The rfcomm tty device will possibly retain even when conn
|
||||
* is down, and sysfs doesn't support move zombie device,
|
||||
* so we should move the device before conn device is destroyed.
|
||||
*/
|
||||
static int __match_tty(struct device *dev, void *data)
|
||||
void hci_conn_del_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
return !strncmp(dev_name(dev), "rfcomm", 6);
|
||||
}
|
||||
|
||||
static void del_conn(struct work_struct *work)
|
||||
{
|
||||
struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
if (!device_is_registered(&conn->dev))
|
||||
@ -140,36 +153,6 @@ static void del_conn(struct work_struct *work)
|
||||
hci_dev_put(hdev);
|
||||
}
|
||||
|
||||
void hci_conn_init_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
conn->dev.type = &bt_link;
|
||||
conn->dev.class = bt_class;
|
||||
conn->dev.parent = &hdev->dev;
|
||||
|
||||
device_initialize(&conn->dev);
|
||||
|
||||
INIT_WORK(&conn->work_add, add_conn);
|
||||
INIT_WORK(&conn->work_del, del_conn);
|
||||
}
|
||||
|
||||
void hci_conn_add_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
queue_work(conn->hdev->workqueue, &conn->work_add);
|
||||
}
|
||||
|
||||
void hci_conn_del_sysfs(struct hci_conn *conn)
|
||||
{
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
queue_work(conn->hdev->workqueue, &conn->work_del);
|
||||
}
|
||||
|
||||
static inline char *host_bustostr(int bus)
|
||||
{
|
||||
switch (bus) {
|
||||
@ -403,7 +386,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
|
||||
struct inquiry_cache *cache = &hdev->inq_cache;
|
||||
struct inquiry_entry *e;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
for (e = cache->list; e; e = e->next) {
|
||||
struct inquiry_data *data = &e->data;
|
||||
@ -416,7 +399,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
|
||||
data->rssi, data->ssp_mode, e->timestamp);
|
||||
}
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -438,12 +421,12 @@ static int blacklist_show(struct seq_file *f, void *p)
|
||||
struct hci_dev *hdev = f->private;
|
||||
struct bdaddr_list *b;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
list_for_each_entry(b, &hdev->blacklist, list)
|
||||
seq_printf(f, "%s\n", batostr(&b->bdaddr));
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -482,12 +465,12 @@ static int uuids_show(struct seq_file *f, void *p)
|
||||
struct hci_dev *hdev = f->private;
|
||||
struct bt_uuid *uuid;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
list_for_each_entry(uuid, &hdev->uuids, list)
|
||||
print_bt_uuid(f, uuid->uuid);
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -508,11 +491,11 @@ static int auto_accept_delay_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
hdev->auto_accept_delay = val;
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -521,11 +504,11 @@ static int auto_accept_delay_get(void *data, u64 *val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
*val = hdev->auto_accept_delay;
|
||||
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -795,11 +795,11 @@ static struct hci_conn *hidp_get_connection(struct hidp_session *session)
|
||||
if (!hdev)
|
||||
return NULL;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
|
||||
if (conn)
|
||||
hci_conn_hold_device(conn);
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
hci_dev_put(hdev);
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
Copyright (C) 2000-2001 Qualcomm Incorporated
|
||||
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
|
||||
Copyright (C) 2010 Google Inc.
|
||||
Copyright (C) 2011 ProFUSION Embedded Systems
|
||||
|
||||
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
|
||||
|
||||
@ -89,24 +90,36 @@ static inline void chan_put(struct l2cap_chan *c)
|
||||
|
||||
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
|
||||
{
|
||||
struct l2cap_chan *c;
|
||||
struct l2cap_chan *c, *r = NULL;
|
||||
|
||||
list_for_each_entry(c, &conn->chan_l, list) {
|
||||
if (c->dcid == cid)
|
||||
return c;
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &conn->chan_l, list) {
|
||||
if (c->dcid == cid) {
|
||||
r = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
|
||||
{
|
||||
struct l2cap_chan *c;
|
||||
struct l2cap_chan *c, *r = NULL;
|
||||
|
||||
list_for_each_entry(c, &conn->chan_l, list) {
|
||||
if (c->scid == cid)
|
||||
return c;
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &conn->chan_l, list) {
|
||||
if (c->scid == cid) {
|
||||
r = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Find channel with given SCID.
|
||||
@ -115,34 +128,36 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci
|
||||
{
|
||||
struct l2cap_chan *c;
|
||||
|
||||
read_lock(&conn->chan_lock);
|
||||
c = __l2cap_get_chan_by_scid(conn, cid);
|
||||
if (c)
|
||||
bh_lock_sock(c->sk);
|
||||
read_unlock(&conn->chan_lock);
|
||||
lock_sock(c->sk);
|
||||
return c;
|
||||
}
|
||||
|
||||
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
|
||||
{
|
||||
struct l2cap_chan *c;
|
||||
struct l2cap_chan *c, *r = NULL;
|
||||
|
||||
list_for_each_entry(c, &conn->chan_l, list) {
|
||||
if (c->ident == ident)
|
||||
return c;
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &conn->chan_l, list) {
|
||||
if (c->ident == ident) {
|
||||
r = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
|
||||
{
|
||||
struct l2cap_chan *c;
|
||||
|
||||
read_lock(&conn->chan_lock);
|
||||
c = __l2cap_get_chan_by_ident(conn, ident);
|
||||
if (c)
|
||||
bh_lock_sock(c->sk);
|
||||
read_unlock(&conn->chan_lock);
|
||||
lock_sock(c->sk);
|
||||
return c;
|
||||
}
|
||||
|
||||
@ -213,20 +228,18 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
|
||||
static void l2cap_set_timer(struct l2cap_chan *chan, struct delayed_work *work, long timeout)
|
||||
{
|
||||
BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
|
||||
|
||||
if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
|
||||
chan_hold(chan);
|
||||
cancel_delayed_work_sync(work);
|
||||
|
||||
schedule_delayed_work(work, timeout);
|
||||
}
|
||||
|
||||
static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
|
||||
static void l2cap_clear_timer(struct delayed_work *work)
|
||||
{
|
||||
BT_DBG("chan %p state %d", chan, chan->state);
|
||||
|
||||
if (timer_pending(timer) && del_timer(timer))
|
||||
chan_put(chan);
|
||||
cancel_delayed_work_sync(work);
|
||||
}
|
||||
|
||||
static char *state_to_string(int state)
|
||||
@ -264,23 +277,16 @@ static void l2cap_state_change(struct l2cap_chan *chan, int state)
|
||||
chan->ops->state_change(chan->data, state);
|
||||
}
|
||||
|
||||
static void l2cap_chan_timeout(unsigned long arg)
|
||||
static void l2cap_chan_timeout(struct work_struct *work)
|
||||
{
|
||||
struct l2cap_chan *chan = (struct l2cap_chan *) arg;
|
||||
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
|
||||
chan_timer.work);
|
||||
struct sock *sk = chan->sk;
|
||||
int reason;
|
||||
|
||||
BT_DBG("chan %p state %d", chan, chan->state);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
|
||||
if (sock_owned_by_user(sk)) {
|
||||
/* sk is owned by user. Try again later */
|
||||
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
|
||||
bh_unlock_sock(sk);
|
||||
chan_put(chan);
|
||||
return;
|
||||
}
|
||||
lock_sock(sk);
|
||||
|
||||
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
|
||||
reason = ECONNREFUSED;
|
||||
@ -292,7 +298,7 @@ static void l2cap_chan_timeout(unsigned long arg)
|
||||
|
||||
l2cap_chan_close(chan, reason);
|
||||
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
|
||||
chan->ops->close(chan->data);
|
||||
chan_put(chan);
|
||||
@ -312,7 +318,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
|
||||
list_add(&chan->global_l, &chan_list);
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
|
||||
setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
|
||||
INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
|
||||
|
||||
chan->state = BT_OPEN;
|
||||
|
||||
@ -332,7 +338,7 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
|
||||
chan_put(chan);
|
||||
}
|
||||
|
||||
static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
||||
static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
||||
{
|
||||
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
|
||||
chan->psm, chan->dcid);
|
||||
@ -373,7 +379,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
||||
|
||||
chan_hold(chan);
|
||||
|
||||
list_add(&chan->list, &conn->chan_l);
|
||||
list_add_rcu(&chan->list, &conn->chan_l);
|
||||
}
|
||||
|
||||
/* Delete channel.
|
||||
@ -390,9 +396,9 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
|
||||
|
||||
if (conn) {
|
||||
/* Delete from channel list */
|
||||
write_lock_bh(&conn->chan_lock);
|
||||
list_del(&chan->list);
|
||||
write_unlock_bh(&conn->chan_lock);
|
||||
list_del_rcu(&chan->list);
|
||||
synchronize_rcu();
|
||||
|
||||
chan_put(chan);
|
||||
|
||||
chan->conn = NULL;
|
||||
@ -707,7 +713,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
|
||||
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
|
||||
conn->info_ident = l2cap_get_ident(conn);
|
||||
|
||||
mod_timer(&conn->info_timer, jiffies +
|
||||
schedule_delayed_work(&conn->info_work,
|
||||
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
|
||||
|
||||
l2cap_send_cmd(conn, conn->info_ident,
|
||||
@ -759,13 +765,13 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
|
||||
/* ---- L2CAP connections ---- */
|
||||
static void l2cap_conn_start(struct l2cap_conn *conn)
|
||||
{
|
||||
struct l2cap_chan *chan, *tmp;
|
||||
struct l2cap_chan *chan;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
read_lock(&conn->chan_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
|
||||
list_for_each_entry_rcu(chan, &conn->chan_l, list) {
|
||||
struct sock *sk = chan->sk;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
@ -789,9 +795,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
|
||||
&chan->conf_state)) {
|
||||
/* l2cap_chan_close() calls list_del(chan)
|
||||
* so release the lock */
|
||||
read_unlock(&conn->chan_lock);
|
||||
l2cap_chan_close(chan, ECONNRESET);
|
||||
read_lock(&conn->chan_lock);
|
||||
bh_unlock_sock(sk);
|
||||
continue;
|
||||
}
|
||||
@ -847,7 +851,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
|
||||
bh_unlock_sock(sk);
|
||||
}
|
||||
|
||||
read_unlock(&conn->chan_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Find socket with cid and source bdaddr.
|
||||
@ -898,7 +902,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
|
||||
|
||||
parent = pchan->sk;
|
||||
|
||||
bh_lock_sock(parent);
|
||||
lock_sock(parent);
|
||||
|
||||
/* Check for backlog size */
|
||||
if (sk_acceptq_is_full(parent)) {
|
||||
@ -912,8 +916,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
|
||||
|
||||
sk = chan->sk;
|
||||
|
||||
write_lock_bh(&conn->chan_lock);
|
||||
|
||||
hci_conn_hold(conn->hcon);
|
||||
|
||||
bacpy(&bt_sk(sk)->src, conn->src);
|
||||
@ -921,17 +923,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
|
||||
|
||||
bt_accept_enqueue(parent, sk);
|
||||
|
||||
__l2cap_chan_add(conn, chan);
|
||||
l2cap_chan_add(conn, chan);
|
||||
|
||||
__set_chan_timer(chan, sk->sk_sndtimeo);
|
||||
|
||||
l2cap_state_change(chan, BT_CONNECTED);
|
||||
parent->sk_data_ready(parent, 0);
|
||||
|
||||
write_unlock_bh(&conn->chan_lock);
|
||||
|
||||
clean:
|
||||
bh_unlock_sock(parent);
|
||||
release_sock(parent);
|
||||
}
|
||||
|
||||
static void l2cap_chan_ready(struct sock *sk)
|
||||
@ -963,9 +963,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
|
||||
if (conn->hcon->out && conn->hcon->type == LE_LINK)
|
||||
smp_conn_security(conn, conn->hcon->pending_sec_level);
|
||||
|
||||
read_lock(&conn->chan_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
list_for_each_entry_rcu(chan, &conn->chan_l, list) {
|
||||
struct sock *sk = chan->sk;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
@ -985,7 +985,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
|
||||
bh_unlock_sock(sk);
|
||||
}
|
||||
|
||||
read_unlock(&conn->chan_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Notify sockets that we cannot guaranty reliability anymore */
|
||||
@ -995,21 +995,22 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
read_lock(&conn->chan_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
list_for_each_entry_rcu(chan, &conn->chan_l, list) {
|
||||
struct sock *sk = chan->sk;
|
||||
|
||||
if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
|
||||
sk->sk_err = err;
|
||||
}
|
||||
|
||||
read_unlock(&conn->chan_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void l2cap_info_timeout(unsigned long arg)
|
||||
static void l2cap_info_timeout(struct work_struct *work)
|
||||
{
|
||||
struct l2cap_conn *conn = (void *) arg;
|
||||
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
|
||||
info_work.work);
|
||||
|
||||
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
|
||||
conn->info_ident = 0;
|
||||
@ -1033,16 +1034,16 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
|
||||
/* Kill channels */
|
||||
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
|
||||
sk = chan->sk;
|
||||
bh_lock_sock(sk);
|
||||
lock_sock(sk);
|
||||
l2cap_chan_del(chan, err);
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
chan->ops->close(chan->data);
|
||||
}
|
||||
|
||||
hci_chan_del(conn->hchan);
|
||||
|
||||
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
|
||||
del_timer_sync(&conn->info_timer);
|
||||
cancel_delayed_work_sync(&conn->info_work);
|
||||
|
||||
if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
|
||||
del_timer(&conn->security_timer);
|
||||
@ -1095,7 +1096,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
|
||||
conn->feat_mask = 0;
|
||||
|
||||
spin_lock_init(&conn->lock);
|
||||
rwlock_init(&conn->chan_lock);
|
||||
|
||||
INIT_LIST_HEAD(&conn->chan_l);
|
||||
|
||||
@ -1103,21 +1103,13 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
|
||||
setup_timer(&conn->security_timer, security_timeout,
|
||||
(unsigned long) conn);
|
||||
else
|
||||
setup_timer(&conn->info_timer, l2cap_info_timeout,
|
||||
(unsigned long) conn);
|
||||
INIT_DELAYED_WORK(&conn->info_work, l2cap_info_timeout);
|
||||
|
||||
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
||||
{
|
||||
write_lock_bh(&conn->chan_lock);
|
||||
__l2cap_chan_add(conn, chan);
|
||||
write_unlock_bh(&conn->chan_lock);
|
||||
}
|
||||
|
||||
/* ---- Socket interface ---- */
|
||||
|
||||
/* Find socket with psm and source bdaddr.
|
||||
@ -1153,11 +1145,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
|
||||
return c1;
|
||||
}
|
||||
|
||||
int l2cap_chan_connect(struct l2cap_chan *chan)
|
||||
inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
|
||||
{
|
||||
struct sock *sk = chan->sk;
|
||||
bdaddr_t *src = &bt_sk(sk)->src;
|
||||
bdaddr_t *dst = &bt_sk(sk)->dst;
|
||||
struct l2cap_conn *conn;
|
||||
struct hci_conn *hcon;
|
||||
struct hci_dev *hdev;
|
||||
@ -1171,7 +1162,62 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
|
||||
if (!hdev)
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* PSM must be odd and lsb of upper byte must be 0 */
|
||||
if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
|
||||
chan->chan_type != L2CAP_CHAN_RAW) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (chan->mode) {
|
||||
case L2CAP_MODE_BASIC:
|
||||
break;
|
||||
case L2CAP_MODE_ERTM:
|
||||
case L2CAP_MODE_STREAMING:
|
||||
if (!disable_ertm)
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
err = -ENOTSUPP;
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (sk->sk_state) {
|
||||
case BT_CONNECT:
|
||||
case BT_CONNECT2:
|
||||
case BT_CONFIG:
|
||||
/* Already connecting */
|
||||
err = 0;
|
||||
goto done;
|
||||
|
||||
case BT_CONNECTED:
|
||||
/* Already connected */
|
||||
err = -EISCONN;
|
||||
goto done;
|
||||
|
||||
case BT_OPEN:
|
||||
case BT_BOUND:
|
||||
/* Can connect */
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -EBADFD;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Set destination address and psm */
|
||||
bacpy(&bt_sk(sk)->dst, src);
|
||||
chan->psm = psm;
|
||||
chan->dcid = cid;
|
||||
|
||||
auth_type = l2cap_get_auth_type(chan);
|
||||
|
||||
@ -1214,7 +1260,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
|
||||
err = 0;
|
||||
|
||||
done:
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
return err;
|
||||
}
|
||||
@ -1251,17 +1297,18 @@ int __l2cap_wait_ack(struct sock *sk)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void l2cap_monitor_timeout(unsigned long arg)
|
||||
static void l2cap_monitor_timeout(struct work_struct *work)
|
||||
{
|
||||
struct l2cap_chan *chan = (void *) arg;
|
||||
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
|
||||
monitor_timer.work);
|
||||
struct sock *sk = chan->sk;
|
||||
|
||||
BT_DBG("chan %p", chan);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
lock_sock(sk);
|
||||
if (chan->retry_count >= chan->remote_max_tx) {
|
||||
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1269,24 +1316,25 @@ static void l2cap_monitor_timeout(unsigned long arg)
|
||||
__set_monitor_timer(chan);
|
||||
|
||||
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
}
|
||||
|
||||
static void l2cap_retrans_timeout(unsigned long arg)
|
||||
static void l2cap_retrans_timeout(struct work_struct *work)
|
||||
{
|
||||
struct l2cap_chan *chan = (void *) arg;
|
||||
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
|
||||
retrans_timer.work);
|
||||
struct sock *sk = chan->sk;
|
||||
|
||||
BT_DBG("chan %p", chan);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
lock_sock(sk);
|
||||
chan->retry_count = 1;
|
||||
__set_monitor_timer(chan);
|
||||
|
||||
set_bit(CONN_WAIT_F, &chan->conn_state);
|
||||
|
||||
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
}
|
||||
|
||||
static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
|
||||
@ -1778,8 +1826,9 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
read_lock(&conn->chan_lock);
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(chan, &conn->chan_l, list) {
|
||||
struct sock *sk = chan->sk;
|
||||
if (chan->chan_type != L2CAP_CHAN_RAW)
|
||||
continue;
|
||||
@ -1794,7 +1843,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
if (chan->ops->recv(chan->data, nskb))
|
||||
kfree_skb(nskb);
|
||||
}
|
||||
read_unlock(&conn->chan_lock);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* ---- L2CAP signalling commands ---- */
|
||||
@ -1955,37 +2005,31 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
|
||||
(unsigned long) &efs);
|
||||
}
|
||||
|
||||
static void l2cap_ack_timeout(unsigned long arg)
|
||||
static void l2cap_ack_timeout(struct work_struct *work)
|
||||
{
|
||||
struct l2cap_chan *chan = (void *) arg;
|
||||
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
|
||||
ack_timer.work);
|
||||
|
||||
bh_lock_sock(chan->sk);
|
||||
lock_sock(chan->sk);
|
||||
l2cap_send_ack(chan);
|
||||
bh_unlock_sock(chan->sk);
|
||||
release_sock(chan->sk);
|
||||
}
|
||||
|
||||
static inline void l2cap_ertm_init(struct l2cap_chan *chan)
|
||||
{
|
||||
struct sock *sk = chan->sk;
|
||||
|
||||
chan->expected_ack_seq = 0;
|
||||
chan->unacked_frames = 0;
|
||||
chan->buffer_seq = 0;
|
||||
chan->num_acked = 0;
|
||||
chan->frames_sent = 0;
|
||||
|
||||
setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
|
||||
(unsigned long) chan);
|
||||
setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
|
||||
(unsigned long) chan);
|
||||
setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
|
||||
INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
|
||||
INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
|
||||
INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
|
||||
|
||||
skb_queue_head_init(&chan->srej_q);
|
||||
|
||||
INIT_LIST_HEAD(&chan->srej_l);
|
||||
|
||||
|
||||
sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
|
||||
}
|
||||
|
||||
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
|
||||
@ -2372,7 +2416,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
|
||||
void *ptr = req->data;
|
||||
int type, olen;
|
||||
unsigned long val;
|
||||
struct l2cap_conf_rfc rfc;
|
||||
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
|
||||
struct l2cap_conf_efs efs;
|
||||
|
||||
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
|
||||
@ -2522,6 +2566,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
|
||||
}
|
||||
}
|
||||
|
||||
/* Use sane default values in case a misbehaving remote device
|
||||
* did not send an RFC option.
|
||||
*/
|
||||
rfc.mode = chan->mode;
|
||||
rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
|
||||
rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
|
||||
rfc.max_pdu_size = cpu_to_le16(chan->imtu);
|
||||
|
||||
BT_ERR("Expected RFC option was not found, using defaults");
|
||||
|
||||
done:
|
||||
switch (rfc.mode) {
|
||||
case L2CAP_MODE_ERTM:
|
||||
@ -2543,7 +2597,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
|
||||
|
||||
if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
|
||||
cmd->ident == conn->info_ident) {
|
||||
del_timer(&conn->info_timer);
|
||||
cancel_delayed_work_sync(&conn->info_work);
|
||||
|
||||
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
|
||||
conn->info_ident = 0;
|
||||
@ -2576,7 +2630,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
|
||||
|
||||
parent = pchan->sk;
|
||||
|
||||
bh_lock_sock(parent);
|
||||
lock_sock(parent);
|
||||
|
||||
/* Check if the ACL is secure enough (if not SDP) */
|
||||
if (psm != cpu_to_le16(0x0001) &&
|
||||
@ -2600,11 +2654,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
|
||||
|
||||
sk = chan->sk;
|
||||
|
||||
write_lock_bh(&conn->chan_lock);
|
||||
|
||||
/* Check if we already have channel with that dcid */
|
||||
if (__l2cap_get_chan_by_dcid(conn, scid)) {
|
||||
write_unlock_bh(&conn->chan_lock);
|
||||
sock_set_flag(sk, SOCK_ZAPPED);
|
||||
chan->ops->close(chan->data);
|
||||
goto response;
|
||||
@ -2619,7 +2670,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
|
||||
|
||||
bt_accept_enqueue(parent, sk);
|
||||
|
||||
__l2cap_chan_add(conn, chan);
|
||||
l2cap_chan_add(conn, chan);
|
||||
|
||||
dcid = chan->scid;
|
||||
|
||||
@ -2650,10 +2701,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
|
||||
status = L2CAP_CS_NO_INFO;
|
||||
}
|
||||
|
||||
write_unlock_bh(&conn->chan_lock);
|
||||
|
||||
response:
|
||||
bh_unlock_sock(parent);
|
||||
release_sock(parent);
|
||||
|
||||
sendresp:
|
||||
rsp.scid = cpu_to_le16(scid);
|
||||
@ -2669,7 +2718,7 @@ sendresp:
|
||||
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
|
||||
conn->info_ident = l2cap_get_ident(conn);
|
||||
|
||||
mod_timer(&conn->info_timer, jiffies +
|
||||
schedule_delayed_work(&conn->info_work,
|
||||
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
|
||||
|
||||
l2cap_send_cmd(conn, conn->info_ident,
|
||||
@ -2735,19 +2784,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
|
||||
break;
|
||||
|
||||
default:
|
||||
/* don't delete l2cap channel if sk is owned by user */
|
||||
if (sock_owned_by_user(sk)) {
|
||||
l2cap_state_change(chan, BT_DISCONN);
|
||||
__clear_chan_timer(chan);
|
||||
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
|
||||
break;
|
||||
}
|
||||
|
||||
l2cap_chan_del(chan, ECONNREFUSED);
|
||||
break;
|
||||
}
|
||||
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2869,7 +2910,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
|
||||
}
|
||||
|
||||
unlock:
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2976,7 +3017,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
|
||||
}
|
||||
|
||||
done:
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3005,17 +3046,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
|
||||
|
||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||
|
||||
/* don't delete l2cap channel if sk is owned by user */
|
||||
if (sock_owned_by_user(sk)) {
|
||||
l2cap_state_change(chan, BT_DISCONN);
|
||||
__clear_chan_timer(chan);
|
||||
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
|
||||
bh_unlock_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
l2cap_chan_del(chan, ECONNRESET);
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
|
||||
chan->ops->close(chan->data);
|
||||
return 0;
|
||||
@ -3039,17 +3071,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
|
||||
|
||||
sk = chan->sk;
|
||||
|
||||
/* don't delete l2cap channel if sk is owned by user */
|
||||
if (sock_owned_by_user(sk)) {
|
||||
l2cap_state_change(chan, BT_DISCONN);
|
||||
__clear_chan_timer(chan);
|
||||
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
|
||||
bh_unlock_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
l2cap_chan_del(chan, 0);
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
|
||||
chan->ops->close(chan->data);
|
||||
return 0;
|
||||
@ -3120,7 +3143,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
|
||||
conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
|
||||
return 0;
|
||||
|
||||
del_timer(&conn->info_timer);
|
||||
cancel_delayed_work_sync(&conn->info_work);
|
||||
|
||||
if (result != L2CAP_IR_SUCCESS) {
|
||||
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
|
||||
@ -4237,12 +4260,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
|
||||
break;
|
||||
|
||||
case L2CAP_MODE_ERTM:
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
l2cap_ertm_data_rcv(sk, skb);
|
||||
} else {
|
||||
if (sk_add_backlog(sk, skb))
|
||||
goto drop;
|
||||
}
|
||||
l2cap_ertm_data_rcv(sk, skb);
|
||||
|
||||
goto done;
|
||||
|
||||
@ -4292,7 +4310,7 @@ drop:
|
||||
|
||||
done:
|
||||
if (sk)
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -4308,7 +4326,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
|
||||
|
||||
sk = chan->sk;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
lock_sock(sk);
|
||||
|
||||
BT_DBG("sk %p, len %d", sk, skb->len);
|
||||
|
||||
@ -4326,7 +4344,7 @@ drop:
|
||||
|
||||
done:
|
||||
if (sk)
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4341,7 +4359,7 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
|
||||
|
||||
sk = chan->sk;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
lock_sock(sk);
|
||||
|
||||
BT_DBG("sk %p, len %d", sk, skb->len);
|
||||
|
||||
@ -4359,7 +4377,7 @@ drop:
|
||||
|
||||
done:
|
||||
if (sk)
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4518,9 +4536,9 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
||||
del_timer(&conn->security_timer);
|
||||
}
|
||||
|
||||
read_lock(&conn->chan_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
list_for_each_entry_rcu(chan, &conn->chan_l, list) {
|
||||
struct sock *sk = chan->sk;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
@ -4598,7 +4616,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
||||
bh_unlock_sock(sk);
|
||||
}
|
||||
|
||||
read_unlock(&conn->chan_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -4664,11 +4682,11 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
|
||||
BT_ERR("Frame exceeding recv MTU (len %d, "
|
||||
"MTU %d)", len,
|
||||
chan->imtu);
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
l2cap_conn_unreliable(conn, ECOMM);
|
||||
goto drop;
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
release_sock(sk);
|
||||
}
|
||||
|
||||
/* Allocate skb for the complete frame (with header) */
|
||||
|
@ -3,6 +3,7 @@
|
||||
Copyright (C) 2000-2001 Qualcomm Incorporated
|
||||
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
|
||||
Copyright (C) 2010 Google Inc.
|
||||
Copyright (C) 2011 ProFUSION Embedded Systems
|
||||
|
||||
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
|
||||
|
||||
@ -122,70 +123,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
|
||||
if (la.l2_cid && la.l2_psm)
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
|
||||
&& !(la.l2_psm || la.l2_cid)) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (chan->mode) {
|
||||
case L2CAP_MODE_BASIC:
|
||||
break;
|
||||
case L2CAP_MODE_ERTM:
|
||||
case L2CAP_MODE_STREAMING:
|
||||
if (!disable_ertm)
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
err = -ENOTSUPP;
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (sk->sk_state) {
|
||||
case BT_CONNECT:
|
||||
case BT_CONNECT2:
|
||||
case BT_CONFIG:
|
||||
/* Already connecting */
|
||||
goto wait;
|
||||
|
||||
case BT_CONNECTED:
|
||||
/* Already connected */
|
||||
err = -EISCONN;
|
||||
goto done;
|
||||
|
||||
case BT_OPEN:
|
||||
case BT_BOUND:
|
||||
/* Can connect */
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -EBADFD;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* PSM must be odd and lsb of upper byte must be 0 */
|
||||
if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
|
||||
chan->chan_type != L2CAP_CHAN_RAW) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Set destination address and psm */
|
||||
bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
|
||||
chan->psm = la.l2_psm;
|
||||
chan->dcid = la.l2_cid;
|
||||
|
||||
err = l2cap_chan_connect(l2cap_pi(sk)->chan);
|
||||
err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
wait:
|
||||
err = bt_sock_wait_state(sk, BT_CONNECTED,
|
||||
sock_sndtimeo(sk, flags & O_NONBLOCK));
|
||||
done:
|
||||
release_sock(sk);
|
||||
if (sock_owned_by_user(sk))
|
||||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
1018
net/bluetooth/mgmt.c
1018
net/bluetooth/mgmt.c
File diff suppressed because it is too large
Load Diff
@ -1162,6 +1162,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
|
||||
if (list_empty(&s->dlcs)) {
|
||||
s->state = BT_DISCONN;
|
||||
rfcomm_send_disc(s, 0);
|
||||
rfcomm_session_clear_timer(s);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk)
|
||||
if (!hdev)
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
hci_dev_lock_bh(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (lmp_esco_capable(hdev) && !disable_esco)
|
||||
type = ESCO_LINK;
|
||||
@ -225,7 +225,7 @@ static int sco_connect(struct sock *sk)
|
||||
}
|
||||
|
||||
done:
|
||||
hci_dev_unlock_bh(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
return err;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user