Commit e563589f authored by John W. Linville's avatar John W. Linville

Merge branch 'for-upstream' of...

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
parents 197bbf0a 76a388be
......@@ -90,6 +90,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x0489, 0xe04e) },
{ USB_DEVICE(0x0489, 0xe056) },
{ USB_DEVICE(0x0489, 0xe04d) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
......@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
......
......@@ -148,6 +148,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
......@@ -926,6 +927,22 @@ static void btusb_waker(struct work_struct *work)
usb_autopm_put_interface(data->intf);
}
static int btusb_setup_bcm92035(struct hci_dev *hdev)
{
struct sk_buff *skb;
u8 val = 0x00;
BT_DBG("%s", hdev->name);
skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
BT_ERR("BCM92035 command failed (%ld)", -PTR_ERR(skb));
else
kfree_skb(skb);
return 0;
}
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
......@@ -1028,6 +1045,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
......@@ -1065,17 +1085,6 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc = NULL;
}
if (id->driver_info & BTUSB_BCM92035) {
unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 };
struct sk_buff *skb;
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (skb) {
memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
skb_queue_tail(&hdev->driver_init, skb);
}
}
if (data->isoc) {
err = usb_driver_claim_interface(&btusb_driver,
data->isoc, data);
......
......@@ -153,6 +153,9 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
{
int ret;
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
ret = hci_recv_stream_fragment(hu->hdev, data, count);
if (ret < 0) {
BT_ERR("Frame Reassembly Failed");
......
......@@ -388,7 +388,10 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
spin_lock(&hu->rx_lock);
hu->proto->recv(hu, (void *) data, count);
if (hu->hdev)
hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
......
......@@ -193,11 +193,11 @@ static inline bool bdaddr_type_is_le(__u8 type)
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
/* Copy, swap, convert BD Address */
static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
{
return memcmp(ba1, ba2, sizeof(bdaddr_t));
}
static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
{
memcpy(dst, src, sizeof(bdaddr_t));
}
......@@ -266,6 +266,7 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
struct hci_req_ctrl {
bool start;
u8 event;
hci_req_complete_t complete;
};
......
......@@ -984,6 +984,9 @@ struct hci_cp_le_set_adv_data {
#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
#define LE_SCAN_PASSIVE 0x00
#define LE_SCAN_ACTIVE 0x01
#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
struct hci_cp_le_set_scan_param {
__u8 type;
......@@ -993,8 +996,10 @@ struct hci_cp_le_set_scan_param {
__u8 filter_policy;
} __packed;
#define LE_SCANNING_DISABLED 0x00
#define LE_SCANNING_ENABLED 0x01
#define LE_SCAN_DISABLE 0x00
#define LE_SCAN_ENABLE 0x01
#define LE_SCAN_FILTER_DUP_DISABLE 0x00
#define LE_SCAN_FILTER_DUP_ENABLE 0x01
#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
struct hci_cp_le_set_scan_enable {
......
......@@ -134,6 +134,8 @@ struct amp_assoc {
__u8 data[HCI_MAX_AMP_ASSOC_SIZE];
};
#define HCI_MAX_PAGES 3
#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
......@@ -151,8 +153,8 @@ struct hci_dev {
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
__u8 features[8];
__u8 host_features[8];
__u8 max_page;
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
__u8 le_white_list_size;
__u8 le_states[8];
......@@ -244,6 +246,7 @@ struct hci_dev {
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
struct sk_buff *recv_evt;
struct sk_buff *sent_cmd;
struct sk_buff *reassembly[NUM_REASSEMBLY];
......@@ -268,8 +271,6 @@ struct hci_dev {
struct hci_dev_stats stat;
struct sk_buff_head driver_init;
atomic_t promisc;
struct dentry *debugfs;
......@@ -292,6 +293,7 @@ struct hci_dev {
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*setup)(struct hci_dev *hdev);
int (*send)(struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
......@@ -313,7 +315,7 @@ struct hci_conn {
bool out;
__u8 attempt;
__u8 dev_class[3];
__u8 features[8];
__u8 features[HCI_MAX_PAGES][8];
__u16 interval;
__u16 pkt_type;
__u16 link_policy;
......@@ -345,7 +347,6 @@ struct hci_conn {
struct timer_list auto_accept_timer;
struct device dev;
atomic_t devref;
struct hci_dev *hdev;
void *l2cap_data;
......@@ -584,7 +585,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
void hci_conn_accept(struct hci_conn *conn, int mask);
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
......@@ -601,8 +601,36 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
void hci_conn_hold_device(struct hci_conn *conn);
void hci_conn_put_device(struct hci_conn *conn);
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
* "hci_conn" object. They do not guarantee that the hci_conn object is running,
* working or anything else. They just guarantee that the object is available
* and can be dereferenced. So you can use its locks, local variables and any
* other constant data.
* Before accessing runtime data, you _must_ lock the object and then check that
* it is still running. As soon as you release the locks, the connection might
* get dropped, though.
*
* On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
* how long the underlying connection is held. So every channel that runs on the
* hci_conn object calls this to prevent the connection from disappearing. As
* long as you hold a device, you must also guarantee that you have a valid
* reference to the device via hci_conn_get() (or the initial reference from
* hci_conn_add()).
* The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
* break because nobody cares for that. But this means, we cannot use
* _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
*/
static inline void hci_conn_get(struct hci_conn *conn)
{
get_device(&conn->dev);
}
static inline void hci_conn_put(struct hci_conn *conn)
{
put_device(&conn->dev);
}
static inline void hci_conn_hold(struct hci_conn *conn)
{
......@@ -612,7 +640,7 @@ static inline void hci_conn_hold(struct hci_conn *conn)
cancel_delayed_work(&conn->disc_work);
}
static inline void hci_conn_put(struct hci_conn *conn)
static inline void hci_conn_drop(struct hci_conn *conn)
{
BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
......@@ -760,29 +788,29 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
/* ----- LMP capabilities ----- */
#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
#define lmp_hold_capable(dev) ((dev)->features[0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[1] & LMP_PARK)
#define lmp_inq_rssi_capable(dev) ((dev)->features[3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[4] & LMP_NO_BREDR))
#define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
#define lmp_pause_enc_capable(dev) ((dev)->features[5] & LMP_PAUSE_ENC)
#define lmp_ext_inq_capable(dev) ((dev)->features[6] & LMP_EXT_INQ)
#define lmp_le_br_capable(dev) !!((dev)->features[6] & LMP_SIMUL_LE_BR)
#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
#define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
#define lmp_lsto_capable(dev) ((dev)->features[7] & LMP_LSTO)
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[7] & LMP_EXTFEATURES)
#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
/* ----- Extended LMP capabilities ----- */
#define lmp_host_ssp_capable(dev) ((dev)->host_features[0] & LMP_HOST_SSP)
#define lmp_host_le_capable(dev) !!((dev)->host_features[0] & LMP_HOST_LE)
#define lmp_host_le_br_capable(dev) !!((dev)->host_features[0] & LMP_HOST_LE_BREDR)
#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
/* returns true if at least one AMP active */
static inline bool hci_amp_capable(void)
......@@ -1054,8 +1082,14 @@ struct hci_request {
void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param);
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
u8 event);
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status);
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u8 event, u32 timeout);
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
......
......@@ -583,6 +583,14 @@ struct l2cap_conn {
struct list_head chan_l;
struct mutex chan_lock;
struct kref ref;
struct list_head users;
};
struct l2cap_user {
struct list_head list;
int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user);
void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user);
};
#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
......@@ -786,6 +794,7 @@ extern bool disable_ertm;
int l2cap_init_sockets(void);
void l2cap_cleanup_sockets(void);
bool l2cap_is_socket(struct socket *sock);
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
int __l2cap_wait_ack(struct sock *sk);
......@@ -812,4 +821,10 @@ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
u8 status);
void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
void l2cap_conn_get(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user);
#endif /* __L2CAP_H */
......@@ -117,6 +117,16 @@ static void hci_acl_create_connection_cancel(struct hci_conn *conn)
hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
}
static void hci_reject_sco(struct hci_conn *conn)
{
struct hci_cp_reject_sync_conn_req cp;
cp.reason = HCI_ERROR_REMOTE_USER_TERM;
bacpy(&cp.bdaddr, &conn->dst);
hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
}
void hci_disconnect(struct hci_conn *conn, __u8 reason)
{
struct hci_cp_disconnect cp;
......@@ -276,6 +286,8 @@ static void hci_conn_timeout(struct work_struct *work)
hci_acl_create_connection_cancel(conn);
else if (conn->type == LE_LINK)
hci_le_create_connection_cancel(conn);
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
hci_reject_sco(conn);
}
break;
case BT_CONFIG:
......@@ -398,8 +410,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
atomic_set(&conn->devref, 0);
hci_conn_init_sysfs(conn);
return conn;
......@@ -433,7 +443,7 @@ int hci_conn_del(struct hci_conn *conn)
struct hci_conn *acl = conn->link;
if (acl) {
acl->link = NULL;
hci_conn_put(acl);
hci_conn_drop(acl);
}
}
......@@ -448,12 +458,11 @@ int hci_conn_del(struct hci_conn *conn)
skb_queue_purge(&conn->data_q);
hci_conn_put_device(conn);
hci_conn_del_sysfs(conn);
hci_dev_put(hdev);
if (conn->handle == 0)
kfree(conn);
hci_conn_put(conn);
return 0;
}
......@@ -565,7 +574,7 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
if (!sco) {
sco = hci_conn_add(hdev, type, dst);
if (!sco) {
hci_conn_put(acl);
hci_conn_drop(acl);
return ERR_PTR(-ENOMEM);
}
}
......@@ -835,19 +844,6 @@ void hci_conn_check_pending(struct hci_dev *hdev)
hci_dev_unlock(hdev);
}
void hci_conn_hold_device(struct hci_conn *conn)
{
atomic_inc(&conn->devref);
}
EXPORT_SYMBOL(hci_conn_hold_device);
void hci_conn_put_device(struct hci_conn *conn)
{
if (atomic_dec_and_test(&conn->devref))
hci_conn_del_sysfs(conn);
}
EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
struct hci_conn *c;
......@@ -980,7 +976,7 @@ void hci_chan_del(struct hci_chan *chan)
synchronize_rcu();
hci_conn_put(conn);
hci_conn_drop(conn);
skb_queue_purge(&chan->data_q);
kfree(chan);
......
......@@ -79,6 +79,121 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
}
}
struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
{
struct hci_ev_cmd_complete *ev;
struct hci_event_hdr *hdr;
struct sk_buff *skb;
hci_dev_lock(hdev);
skb = hdev->recv_evt;
hdev->recv_evt = NULL;
hci_dev_unlock(hdev);
if (!skb)
return ERR_PTR(-ENODATA);
if (skb->len < sizeof(*hdr)) {
BT_ERR("Too short HCI event");
goto failed;
}
hdr = (void *) skb->data;
skb_pull(skb, HCI_EVENT_HDR_SIZE);
if (event) {
if (hdr->evt != event)
goto failed;
return skb;
}
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
goto failed;
}
if (skb->len < sizeof(*ev)) {
BT_ERR("Too short cmd_complete event");
goto failed;
}
ev = (void *) skb->data;
skb_pull(skb, sizeof(*ev));
if (opcode == __le16_to_cpu(ev->opcode))
return skb;
BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
__le16_to_cpu(ev->opcode));
failed:
kfree_skb(skb);
return ERR_PTR(-ENODATA);
}
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u8 event, u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
struct hci_request req;
int err = 0;
BT_DBG("%s", hdev->name);
hci_req_init(&req, hdev);
hci_req_add_ev(&req, opcode, plen, param, event);
hdev->req_status = HCI_REQ_PEND;
err = hci_req_run(&req, hci_req_sync_complete);
if (err < 0)
return ERR_PTR(err);
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(timeout);
remove_wait_queue(&hdev->req_wait_q, &wait);
if (signal_pending(current))
return ERR_PTR(-EINTR);
switch (hdev->req_status) {
case HCI_REQ_DONE:
err = -bt_to_errno(hdev->req_result);
break;
case HCI_REQ_CANCELED:
err = -hdev->req_result;
break;
default:
err = -ETIMEDOUT;
break;
}
hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err);
if (err < 0)
return ERR_PTR(err);
return hci_get_cmd_complete(hdev, opcode, event);
}
EXPORT_SYMBOL(__hci_cmd_sync_ev);
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u32 timeout)
{
return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
}
EXPORT_SYMBOL(__hci_cmd_sync);
/* Execute request and wait for completion. */
static int __hci_req_sync(struct hci_dev *hdev,
void (*func)(struct hci_request *req,
......@@ -201,29 +316,9 @@ static void amp_init(struct hci_request *req)
static void hci_init1_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
struct hci_request init_req;
struct sk_buff *skb;
BT_DBG("%s %ld", hdev->name, opt);
/* Driver initialization */
hci_req_init(&init_req, hdev);
/* Special commands */
while ((skb = skb_dequeue(&hdev->driver_init))) {
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
if (skb_queue_empty(&init_req.cmd_q))
bt_cb(skb)->req.start = true;
skb_queue_tail(&init_req.cmd_q, skb);
}
skb_queue_purge(&hdev->driver_init);
hci_req_run(&init_req, NULL);
/* Reset */
if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
hci_reset_req(req, 0);
......@@ -494,6 +589,7 @@ static void hci_set_le_support(struct hci_request *req)
static void hci_init3_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
u8 p;
if (hdev->commands[5] & 0x10)
hci_setup_link_policy(req);
......@@ -502,6 +598,15 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
hci_set_le_support(req);
hci_update_ad(req);
}
/* Read features beyond page 1 if available */
for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
struct hci_cp_read_local_ext_features cp;
cp.page = p;
hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
sizeof(cp), &cp);
}
}
static int __hci_init(struct hci_dev *hdev)
......@@ -818,6 +923,12 @@ static void hci_inq_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
}
static int wait_inquiry(void *word)
{
schedule();
return signal_pending(current);
}
int hci_inquiry(void __user *arg)
{
__u8 __user *ptr = arg;
......@@ -849,6 +960,13 @@ int hci_inquiry(void __user *arg)
timeo);
if (err < 0)
goto done;
/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
* cleared). If it is interrupted by a signal, return -EINTR.
*/
if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
TASK_INTERRUPTIBLE))
return -EINTR;
}
/* for unlimited number of responses we will use buffer with
......@@ -999,26 +1117,33 @@ int hci_dev_open(__u16 dev)
goto done;
}
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
/* Treat all non BR/EDR controllers as raw devices if
enable_hs is not set */
if (hdev->dev_type != HCI_BREDR && !enable_hs)
set_bit(HCI_RAW, &hdev->flags);
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
if (!test_bit(HCI_RAW, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
ret = hdev->setup(hdev);
if (!ret) {
/* Treat all non BR/EDR controllers as raw devices if
* enable_hs is not set.
*/
if (hdev->dev_type != HCI_BREDR && !enable_hs)
set_bit(HCI_RAW, &hdev->flags);
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
if (!test_bit(HCI_RAW, &hdev->flags))
ret = __hci_init(hdev);
clear_bit(HCI_INIT, &hdev->flags);
}
clear_bit(HCI_INIT, &hdev->flags);
if (!ret) {
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
......@@ -1123,6 +1248,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
kfree_skb(hdev->recv_evt);
hdev->recv_evt = NULL;
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
......@@ -1861,8 +1989,8 @@ static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
struct hci_cp_le_set_scan_enable cp;
memset(&cp, 0, sizeof(cp));
cp.enable = 1;
cp.filter_dup = 1;
cp.enable = LE_SCAN_ENABLE;
cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
......@@ -1896,7 +2024,7 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
return err;
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
msecs_to_jiffies(timeout));
timeout);
return 0;
}
......@@ -2006,7 +2134,6 @@ struct hci_dev *hci_alloc_dev(void)
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
skb_queue_head_init(&hdev->driver_init);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
......@@ -2025,8 +2152,6 @@ EXPORT_SYMBOL(hci_alloc_dev);
/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
skb_queue_purge(&hdev->driver_init);
/* will free via device release */
put_device(&hdev->dev);
}
......@@ -2527,7 +2652,8 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
}
/* Queue a command to an asynchronous HCI request */
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
u8 event)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
......@@ -2551,9 +2677,16 @@ void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
if (skb_queue_empty(&req->cmd_q))
bt_cb(skb)->req.start = true;
bt_cb(skb)->req.event = event;
skb_queue_tail(&req->cmd_q, skb);
}
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
{
hci_req_add_ev(req, opcode, plen, param, 0);
}
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
{
......@@ -3309,32 +3442,6 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
req_complete(hdev, status);
}
void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
{
hci_req_complete_t req_complete = NULL;
BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
if (status) {
hci_req_cmd_complete(hdev, opcode, status);
return;
}
/* No need to handle success status if there are more commands */
if (!hci_req_is_complete(hdev))
return;
if (hdev->sent_cmd)
req_complete = bt_cb(hdev->sent_cmd)->req.complete;
/* If the request doesn't have a complete callback or there
* are other commands/requests in the hdev queue we consider
* this request as completed.
*/
if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
hci_req_cmd_complete(hdev, opcode, status);
}
static void hci_rx_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
......
This diff is collapsed.
......@@ -48,10 +48,10 @@ static ssize_t show_link_features(struct device *dev,
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
conn->features[0], conn->features[1],
conn->features[2], conn->features[3],
conn->features[4], conn->features[5],
conn->features[6], conn->features[7]);
conn->features[0][0], conn->features[0][1],
conn->features[0][2], conn->features[0][3],
conn->features[0][4], conn->features[0][5],
conn->features[0][6], conn->features[0][7]);
}
#define LINK_ATTR(_name, _mode, _show, _store) \
......@@ -146,7 +146,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
}
device_del(&conn->dev);
put_device(&conn->dev);
hci_dev_put(hdev);
}
......@@ -234,10 +233,10 @@ static ssize_t show_features(struct device *dev,
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
hdev->features[0], hdev->features[1],
hdev->features[2], hdev->features[3],
hdev->features[4], hdev->features[5],
hdev->features[6], hdev->features[7]);
hdev->features[0][0], hdev->features[0][1],
hdev->features[0][2], hdev->features[0][3],
hdev->features[0][4], hdev->features[0][5],
hdev->features[0][6], hdev->features[0][7]);
}
static ssize_t show_manufacturer(struct device *dev,
......
This diff is collapsed.
......@@ -24,7 +24,9 @@
#define __HIDP_H
#include <linux/types.h>
#include <linux/kref.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/l2cap.h>
/* HIDP header masks */
#define HIDP_HEADER_TRANS_MASK 0xf0
......@@ -119,43 +121,52 @@ struct hidp_connlist_req {
struct hidp_conninfo __user *ci;
};
int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
int hidp_del_connection(struct hidp_conndel_req *req);
int hidp_connection_add(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
int hidp_connection_del(struct hidp_conndel_req *req);
int hidp_get_connlist(struct hidp_connlist_req *req);
int hidp_get_conninfo(struct hidp_conninfo *ci);
enum hidp_session_state {
HIDP_SESSION_IDLING,
HIDP_SESSION_RUNNING,
};
/* HIDP session defines */
struct hidp_session {
struct list_head list;
struct kref ref;
struct hci_conn *conn;
/* runtime management */
atomic_t state;
wait_queue_head_t state_queue;
atomic_t terminate;
struct task_struct *task;
unsigned long flags;
/* connection management */
bdaddr_t bdaddr;
struct l2cap_conn *conn;
struct l2cap_user user;
struct socket *ctrl_sock;
struct socket *intr_sock;
bdaddr_t bdaddr;
unsigned long state;
unsigned long flags;
unsigned long idle_to;
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
uint ctrl_mtu;
uint intr_mtu;
unsigned long idle_to;
atomic_t terminate;
struct task_struct *task;
unsigned char keys[8];
unsigned char leds;
/* device management */
struct input_dev *input;
struct hid_device *hid;
struct timer_list timer;
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
/* session data */
unsigned char keys[8];
unsigned char leds;
/* Used in hidp_get_raw_report() */
int waiting_report_type; /* HIDP_DATA_RTYPE_* */
......@@ -166,24 +177,8 @@ struct hidp_session {
/* Used in hidp_output_raw_report() */
int output_report_success; /* boolean */
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
wait_queue_head_t startup_queue;
int waiting_for_startup;
};
static inline void hidp_schedule(struct hidp_session *session)
{
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
wake_up_interruptible(sk_sleep(ctrl_sk));
wake_up_interruptible(sk_sleep(intr_sk));
}
/* HIDP init defines */
extern int __init hidp_init_sockets(void);
extern void __exit hidp_cleanup_sockets(void);
......
......@@ -77,21 +77,12 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return err;
}
if (csock->sk->sk_state != BT_CONNECTED ||
isock->sk->sk_state != BT_CONNECTED) {
sockfd_put(csock);
sockfd_put(isock);
return -EBADFD;
}
err = hidp_add_connection(&ca, csock, isock);
if (!err) {
if (copy_to_user(argp, &ca, sizeof(ca)))
err = hidp_connection_add(&ca, csock, isock);
if (!err && copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
} else {
sockfd_put(csock);
sockfd_put(isock);
}
return err;
......@@ -102,7 +93,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
return hidp_del_connection(&cd);
return hidp_connection_del(&cd);
case HIDPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
......@@ -296,7 +287,6 @@ int __init hidp_init_sockets(void)
return 0;
error:
BT_ERR("Can't register HIDP socket");
proto_unregister(&hidp_proto);
return err;
}
......
......@@ -571,7 +571,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
chan->conn = NULL;
if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
hci_conn_put(conn->hcon);
hci_conn_drop(conn->hcon);
if (mgr && mgr->bredr_chan == chan)
mgr->bredr_chan = NULL;
......@@ -1446,6 +1446,89 @@ static void l2cap_info_timeout(struct work_struct *work)
l2cap_conn_start(conn);
}
/*
* l2cap_user
* External modules can register l2cap_user objects on l2cap_conn. The ->probe
* callback is called during registration. The ->remove callback is called
* during unregistration.
* An l2cap_user object can either be explicitly unregistered or when the
* underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
* l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
* External modules must own a reference to the l2cap_conn object if they intend
* to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
* any time if they don't.
*/
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
struct hci_dev *hdev = conn->hcon->hdev;
int ret;
/* We need to check whether l2cap_conn is registered. If it is not, we
* must not register the l2cap_user. l2cap_conn_del() is unregisters
* l2cap_conn objects, but doesn't provide its own locking. Instead, it
* relies on the parent hci_conn object to be locked. This itself relies
* on the hci_dev object to be locked. So we must lock the hci device
* here, too. */
hci_dev_lock(hdev);
if (user->list.next || user->list.prev) {
ret = -EINVAL;
goto out_unlock;
}
/* conn->hchan is NULL after l2cap_conn_del() was called */
if (!conn->hchan) {
ret = -ENODEV;
goto out_unlock;
}
ret = user->probe(conn, user);
if (ret)
goto out_unlock;
list_add(&user->list, &conn->users);
ret = 0;
out_unlock:
hci_dev_unlock(hdev);
return ret;
}
EXPORT_SYMBOL(l2cap_register_user);
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
struct hci_dev *hdev = conn->hcon->hdev;
hci_dev_lock(hdev);
if (!user->list.next || !user->list.prev)
goto out_unlock;
list_del(&user->list);
user->list.next = NULL;
user->list.prev = NULL;
user->remove(conn, user);
out_unlock:
hci_dev_unlock(hdev);
}
EXPORT_SYMBOL(l2cap_unregister_user);
static void l2cap_unregister_all_users(struct l2cap_conn *conn)
{
struct l2cap_user *user;
while (!list_empty(&conn->users)) {
user = list_first_entry(&conn->users, struct l2cap_user, list);
list_del(&user->list);
user->list.next = NULL;
user->list.prev = NULL;
user->remove(conn, user);
}
}
static void l2cap_conn_del(struct hci_conn *hcon, int err)
{
struct l2cap_conn *conn = hcon->l2cap_data;
......@@ -1458,6 +1541,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
kfree_skb(conn->rx_skb);
l2cap_unregister_all_users(conn);
mutex_lock(&conn->chan_lock);
/* Kill channels */
......@@ -1486,7 +1571,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
}
hcon->l2cap_data = NULL;
kfree(conn);
conn->hchan = NULL;
l2cap_conn_put(conn);
}
static void security_timeout(struct work_struct *work)
......@@ -1502,12 +1588,12 @@ static void security_timeout(struct work_struct *work)
}
}
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
{
struct l2cap_conn *conn = hcon->l2cap_data;
struct hci_chan *hchan;
if (conn || status)
if (conn)
return conn;
hchan = hci_chan_create(hcon);
......@@ -1520,8 +1606,10 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
return NULL;
}
kref_init(&conn->ref);
hcon->l2cap_data = conn;
conn->hcon = hcon;
hci_conn_get(conn->hcon);
conn->hchan = hchan;
BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
......@@ -1547,6 +1635,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
mutex_init(&conn->chan_lock);
INIT_LIST_HEAD(&conn->chan_l);
INIT_LIST_HEAD(&conn->users);
if (hcon->type == LE_LINK)
INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
......@@ -1558,6 +1647,26 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
return conn;
}
static void l2cap_conn_free(struct kref *ref)
{
struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
hci_conn_put(conn->hcon);
kfree(conn);
}
void l2cap_conn_get(struct l2cap_conn *conn)
{
kref_get(&conn->ref);
}
EXPORT_SYMBOL(l2cap_conn_get);
void l2cap_conn_put(struct l2cap_conn *conn)
{
kref_put(&conn->ref, l2cap_conn_free);
}
EXPORT_SYMBOL(l2cap_conn_put);
/* ---- Socket interface ---- */
/* Find socket with psm and source / destination bdaddr.
......@@ -1695,9 +1804,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
goto done;
}
conn = l2cap_conn_add(hcon, 0);
conn = l2cap_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
hci_conn_drop(hcon);
err = -ENOMEM;
goto done;
}
......@@ -1707,7 +1816,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
if (!list_empty(&conn->chan_l)) {
err = -EBUSY;
hci_conn_put(hcon);
hci_conn_drop(hcon);
}
if (err)
......@@ -6313,7 +6422,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
conn = l2cap_conn_add(hcon, status);
conn = l2cap_conn_add(hcon);
if (conn)
l2cap_conn_ready(conn);
} else {
......@@ -6482,7 +6591,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
if (!conn)
conn = l2cap_conn_add(hcon, 0);
conn = l2cap_conn_add(hcon);
if (!conn)
goto drop;
......
......@@ -43,6 +43,12 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent);
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
int proto, gfp_t prio);
bool l2cap_is_socket(struct socket *sock)
{
return sock && sock->ops == &l2cap_sock_ops;
}
EXPORT_SYMBOL(l2cap_is_socket);
static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sock *sk = sock->sk;
......
......@@ -106,11 +106,10 @@ static const u16 mgmt_events[] = {
* These LE scan and inquiry parameters were chosen according to LE General
* Discovery Procedure specification.
*/
#define LE_SCAN_TYPE 0x01
#define LE_SCAN_WIN 0x12
#define LE_SCAN_INT 0x12
#define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
#define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
#define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240)
#define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120)
#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
......@@ -2131,7 +2130,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
conn->security_cfm_cb = NULL;
conn->disconn_cfm_cb = NULL;
hci_conn_put(conn);
hci_conn_drop(conn);
mgmt_pending_remove(cmd);
}
......@@ -2222,7 +2221,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (conn->connect_cfm_cb) {
hci_conn_put(conn);
hci_conn_drop(conn);
err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto unlock;
......@@ -2231,7 +2230,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
hci_conn_put(conn);
hci_conn_drop(conn);
goto unlock;
}
......@@ -2703,7 +2702,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
break;
......@@ -2715,8 +2714,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
LE_SCAN_TIMEOUT_BREDR_LE);
err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
break;
default:
......
......@@ -83,7 +83,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
if (!conn)
return NULL;
......@@ -185,7 +185,7 @@ static int sco_connect(struct sock *sk)
conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
hci_conn_drop(hcon);
err = -ENOMEM;
goto done;
}
......@@ -353,7 +353,7 @@ static void __sco_sock_close(struct sock *sk)
if (sco_pi(sk)->conn->hcon) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
hci_conn_put(sco_pi(sk)->conn->hcon);
hci_conn_drop(sco_pi(sk)->conn->hcon);
sco_pi(sk)->conn->hcon = NULL;
} else
sco_chan_del(sk, ECONNRESET);
......@@ -481,8 +481,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
int err;
BT_DBG("sk %p", sk);
......@@ -653,6 +652,42 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("conn %p", conn);
conn->state = BT_CONFIG;
if (!lmp_esco_capable(hdev)) {
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
cp.role = 0x00; /* Become master */
else
cp.role = 0x01; /* Remain slave */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else {
struct hci_cp_accept_sync_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.content_format = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
}
}
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
......@@ -663,7 +698,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
hci_conn_accept(pi->conn->hcon, 0);
sco_conn_defer_accept(pi->conn->hcon, 0);
sk->sk_state = BT_CONFIG;
release_sock(sk);
......@@ -882,7 +917,7 @@ static void sco_chan_del(struct sock *sk, int err)
sco_conn_unlock(conn);
if (conn->hcon)
hci_conn_put(conn->hcon);
hci_conn_drop(conn->hcon);
}
sk->sk_state = BT_CLOSED;
......
......@@ -522,7 +522,7 @@ void smp_chan_destroy(struct l2cap_conn *conn)
kfree(smp);
conn->smp_chan = NULL;
conn->hcon->smp_conn = NULL;
hci_conn_put(conn->hcon);
hci_conn_drop(conn->hcon);
}
int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment