Commit e563589f authored by John W. Linville's avatar John W. Linville

Merge branch 'for-upstream' of...

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
parents 197bbf0a 76a388be
......@@ -90,6 +90,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x0489, 0xe04e) },
{ USB_DEVICE(0x0489, 0xe056) },
{ USB_DEVICE(0x0489, 0xe04d) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
......@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
......
......@@ -148,6 +148,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
......@@ -926,6 +927,22 @@ static void btusb_waker(struct work_struct *work)
usb_autopm_put_interface(data->intf);
}
static int btusb_setup_bcm92035(struct hci_dev *hdev)
{
struct sk_buff *skb;
u8 val = 0x00;
BT_DBG("%s", hdev->name);
skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
BT_ERR("BCM92035 command failed (%ld)", -PTR_ERR(skb));
else
kfree_skb(skb);
return 0;
}
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
......@@ -1022,11 +1039,14 @@ static int btusb_probe(struct usb_interface *intf,
SET_HCIDEV_DEV(hdev, &intf->dev);
hdev->open = btusb_open;
hdev->close = btusb_close;
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
hdev->open = btusb_open;
hdev->close = btusb_close;
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
......@@ -1065,17 +1085,6 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc = NULL;
}
if (id->driver_info & BTUSB_BCM92035) {
unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 };
struct sk_buff *skb;
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (skb) {
memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
skb_queue_tail(&hdev->driver_init, skb);
}
}
if (data->isoc) {
err = usb_driver_claim_interface(&btusb_driver,
data->isoc, data);
......
......@@ -153,6 +153,9 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
{
int ret;
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
ret = hci_recv_stream_fragment(hu->hdev, data, count);
if (ret < 0) {
BT_ERR("Frame Reassembly Failed");
......
......@@ -260,12 +260,12 @@ static int hci_uart_send_frame(struct sk_buff *skb)
/* ------ LDISC part ------ */
/* hci_uart_tty_open
*
*
* Called when line discipline changed to HCI_UART.
*
* Arguments:
* tty pointer to tty info structure
* Return Value:
* Return Value:
* 0 if success, otherwise error code
*/
static int hci_uart_tty_open(struct tty_struct *tty)
......@@ -365,15 +365,15 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
}
/* hci_uart_tty_receive()
*
*
* Called by tty low level driver when receive data is
* available.
*
*
* Arguments: tty pointer to tty isntance data
* data pointer to received data
* flags pointer to flags for data
* count count of received data in bytes
*
*
* Return Value: None
*/
static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
......@@ -388,7 +388,10 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
spin_lock(&hu->rx_lock);
hu->proto->recv(hu, (void *) data, count);
hu->hdev->stat.byte_rx += count;
if (hu->hdev)
hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
......
......@@ -193,11 +193,11 @@ static inline bool bdaddr_type_is_le(__u8 type)
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
/* Copy, swap, convert BD Address */
static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
{
return memcmp(ba1, ba2, sizeof(bdaddr_t));
}
static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
{
memcpy(dst, src, sizeof(bdaddr_t));
}
......@@ -266,6 +266,7 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
struct hci_req_ctrl {
bool start;
u8 event;
hci_req_complete_t complete;
};
......
......@@ -984,6 +984,9 @@ struct hci_cp_le_set_adv_data {
#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
#define LE_SCAN_PASSIVE 0x00
#define LE_SCAN_ACTIVE 0x01
#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
struct hci_cp_le_set_scan_param {
__u8 type;
......@@ -993,8 +996,10 @@ struct hci_cp_le_set_scan_param {
__u8 filter_policy;
} __packed;
#define LE_SCANNING_DISABLED 0x00
#define LE_SCANNING_ENABLED 0x01
#define LE_SCAN_DISABLE 0x00
#define LE_SCAN_ENABLE 0x01
#define LE_SCAN_FILTER_DUP_DISABLE 0x00
#define LE_SCAN_FILTER_DUP_ENABLE 0x01
#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
struct hci_cp_le_set_scan_enable {
......
......@@ -134,6 +134,8 @@ struct amp_assoc {
__u8 data[HCI_MAX_AMP_ASSOC_SIZE];
};
#define HCI_MAX_PAGES 3
#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
......@@ -151,8 +153,8 @@ struct hci_dev {
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
__u8 features[8];
__u8 host_features[8];
__u8 max_page;
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
__u8 le_white_list_size;
__u8 le_states[8];
......@@ -244,6 +246,7 @@ struct hci_dev {
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
struct sk_buff *recv_evt;
struct sk_buff *sent_cmd;
struct sk_buff *reassembly[NUM_REASSEMBLY];
......@@ -268,8 +271,6 @@ struct hci_dev {
struct hci_dev_stats stat;
struct sk_buff_head driver_init;
atomic_t promisc;
struct dentry *debugfs;
......@@ -292,6 +293,7 @@ struct hci_dev {
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*setup)(struct hci_dev *hdev);
int (*send)(struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
......@@ -313,7 +315,7 @@ struct hci_conn {
bool out;
__u8 attempt;
__u8 dev_class[3];
__u8 features[8];
__u8 features[HCI_MAX_PAGES][8];
__u16 interval;
__u16 pkt_type;
__u16 link_policy;
......@@ -345,7 +347,6 @@ struct hci_conn {
struct timer_list auto_accept_timer;
struct device dev;
atomic_t devref;
struct hci_dev *hdev;
void *l2cap_data;
......@@ -584,7 +585,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
void hci_conn_accept(struct hci_conn *conn, int mask);
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
......@@ -601,8 +601,36 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
void hci_conn_hold_device(struct hci_conn *conn);
void hci_conn_put_device(struct hci_conn *conn);
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
* "hci_conn" object. They do not guarantee that the hci_conn object is running,
* working or anything else. They just guarantee that the object is available
* and can be dereferenced. So you can use its locks, local variables and any
* other constant data.
* Before accessing runtime data, you _must_ lock the object and then check that
* it is still running. As soon as you release the locks, the connection might
* get dropped, though.
*
* On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
* how long the underlying connection is held. So every channel that runs on the
* hci_conn object calls this to prevent the connection from disappearing. As
* long as you hold a device, you must also guarantee that you have a valid
* reference to the device via hci_conn_get() (or the initial reference from
* hci_conn_add()).
* The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
* break because nobody cares for that. But this means, we cannot use
* _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
*/
static inline void hci_conn_get(struct hci_conn *conn)
{
get_device(&conn->dev);
}
static inline void hci_conn_put(struct hci_conn *conn)
{
put_device(&conn->dev);
}
static inline void hci_conn_hold(struct hci_conn *conn)
{
......@@ -612,7 +640,7 @@ static inline void hci_conn_hold(struct hci_conn *conn)
cancel_delayed_work(&conn->disc_work);
}
static inline void hci_conn_put(struct hci_conn *conn)
static inline void hci_conn_drop(struct hci_conn *conn)
{
BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
......@@ -760,29 +788,29 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
/* ----- LMP capabilities ----- */
#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
#define lmp_hold_capable(dev) ((dev)->features[0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[1] & LMP_PARK)
#define lmp_inq_rssi_capable(dev) ((dev)->features[3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[4] & LMP_NO_BREDR))
#define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
#define lmp_pause_enc_capable(dev) ((dev)->features[5] & LMP_PAUSE_ENC)
#define lmp_ext_inq_capable(dev) ((dev)->features[6] & LMP_EXT_INQ)
#define lmp_le_br_capable(dev) !!((dev)->features[6] & LMP_SIMUL_LE_BR)
#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
#define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH)
#define lmp_lsto_capable(dev) ((dev)->features[7] & LMP_LSTO)
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[7] & LMP_EXTFEATURES)
#define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
#define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
#define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
/* ----- Extended LMP capabilities ----- */
#define lmp_host_ssp_capable(dev) ((dev)->host_features[0] & LMP_HOST_SSP)
#define lmp_host_le_capable(dev) !!((dev)->host_features[0] & LMP_HOST_LE)
#define lmp_host_le_br_capable(dev) !!((dev)->host_features[0] & LMP_HOST_LE_BREDR)
#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
/* returns true if at least one AMP active */
static inline bool hci_amp_capable(void)
......@@ -1054,8 +1082,14 @@ struct hci_request {
void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param);
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
u8 event);
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status);
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u8 event, u32 timeout);
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
......
......@@ -583,6 +583,14 @@ struct l2cap_conn {
struct list_head chan_l;
struct mutex chan_lock;
struct kref ref;
struct list_head users;
};
struct l2cap_user {
struct list_head list;
int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user);
void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user);
};
#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
......@@ -786,6 +794,7 @@ extern bool disable_ertm;
int l2cap_init_sockets(void);
void l2cap_cleanup_sockets(void);
bool l2cap_is_socket(struct socket *sock);
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
int __l2cap_wait_ack(struct sock *sk);
......@@ -812,4 +821,10 @@ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
u8 status);
void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
void l2cap_conn_get(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user);
#endif /* __L2CAP_H */
......@@ -117,6 +117,16 @@ static void hci_acl_create_connection_cancel(struct hci_conn *conn)
hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
}
static void hci_reject_sco(struct hci_conn *conn)
{
struct hci_cp_reject_sync_conn_req cp;
cp.reason = HCI_ERROR_REMOTE_USER_TERM;
bacpy(&cp.bdaddr, &conn->dst);
hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
}
void hci_disconnect(struct hci_conn *conn, __u8 reason)
{
struct hci_cp_disconnect cp;
......@@ -276,6 +286,8 @@ static void hci_conn_timeout(struct work_struct *work)
hci_acl_create_connection_cancel(conn);
else if (conn->type == LE_LINK)
hci_le_create_connection_cancel(conn);
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
hci_reject_sco(conn);
}
break;
case BT_CONFIG:
......@@ -398,8 +410,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
atomic_set(&conn->devref, 0);
hci_conn_init_sysfs(conn);
return conn;
......@@ -433,7 +443,7 @@ int hci_conn_del(struct hci_conn *conn)
struct hci_conn *acl = conn->link;
if (acl) {
acl->link = NULL;
hci_conn_put(acl);
hci_conn_drop(acl);
}
}
......@@ -448,12 +458,11 @@ int hci_conn_del(struct hci_conn *conn)
skb_queue_purge(&conn->data_q);
hci_conn_put_device(conn);
hci_conn_del_sysfs(conn);
hci_dev_put(hdev);
if (conn->handle == 0)
kfree(conn);
hci_conn_put(conn);
return 0;
}
......@@ -565,7 +574,7 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
if (!sco) {
sco = hci_conn_add(hdev, type, dst);
if (!sco) {
hci_conn_put(acl);
hci_conn_drop(acl);
return ERR_PTR(-ENOMEM);
}
}
......@@ -835,19 +844,6 @@ void hci_conn_check_pending(struct hci_dev *hdev)
hci_dev_unlock(hdev);
}
void hci_conn_hold_device(struct hci_conn *conn)
{
atomic_inc(&conn->devref);
}
EXPORT_SYMBOL(hci_conn_hold_device);
void hci_conn_put_device(struct hci_conn *conn)
{
if (atomic_dec_and_test(&conn->devref))
hci_conn_del_sysfs(conn);
}
EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
struct hci_conn *c;
......@@ -980,7 +976,7 @@ void hci_chan_del(struct hci_chan *chan)
synchronize_rcu();
hci_conn_put(conn);
hci_conn_drop(conn);
skb_queue_purge(&chan->data_q);
kfree(chan);
......
......@@ -79,6 +79,121 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
}
}
struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
{
struct hci_ev_cmd_complete *ev;
struct hci_event_hdr *hdr;
struct sk_buff *skb;
hci_dev_lock(hdev);
skb = hdev->recv_evt;
hdev->recv_evt = NULL;
hci_dev_unlock(hdev);
if (!skb)
return ERR_PTR(-ENODATA);
if (skb->len < sizeof(*hdr)) {
BT_ERR("Too short HCI event");
goto failed;
}
hdr = (void *) skb->data;
skb_pull(skb, HCI_EVENT_HDR_SIZE);
if (event) {
if (hdr->evt != event)
goto failed;
return skb;
}
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
goto failed;
}
if (skb->len < sizeof(*ev)) {
BT_ERR("Too short cmd_complete event");
goto failed;
}
ev = (void *) skb->data;
skb_pull(skb, sizeof(*ev));
if (opcode == __le16_to_cpu(ev->opcode))
return skb;
BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
__le16_to_cpu(ev->opcode));
failed:
kfree_skb(skb);
return ERR_PTR(-ENODATA);
}
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u8 event, u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
struct hci_request req;
int err = 0;
BT_DBG("%s", hdev->name);
hci_req_init(&req, hdev);
hci_req_add_ev(&req, opcode, plen, param, event);
hdev->req_status = HCI_REQ_PEND;
err = hci_req_run(&req, hci_req_sync_complete);
if (err < 0)
return ERR_PTR(err);
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(timeout);
remove_wait_queue(&hdev->req_wait_q, &wait);
if (signal_pending(current))
return ERR_PTR(-EINTR);
switch (hdev->req_status) {
case HCI_REQ_DONE:
err = -bt_to_errno(hdev->req_result);
break;
case HCI_REQ_CANCELED:
err = -hdev->req_result;
break;
default:
err = -ETIMEDOUT;
break;
}
hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err);
if (err < 0)
return ERR_PTR(err);
return hci_get_cmd_complete(hdev, opcode, event);
}
EXPORT_SYMBOL(__hci_cmd_sync_ev);
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
void *param, u32 timeout)
{
return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
}
EXPORT_SYMBOL(__hci_cmd_sync);
/* Execute request and wait for completion. */
static int __hci_req_sync(struct hci_dev *hdev,
void (*func)(struct hci_request *req,
......@@ -201,29 +316,9 @@ static void amp_init(struct hci_request *req)
static void hci_init1_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
struct hci_request init_req;
struct sk_buff *skb;
BT_DBG("%s %ld", hdev->name, opt);
/* Driver initialization */
hci_req_init(&init_req, hdev);
/* Special commands */
while ((skb = skb_dequeue(&hdev->driver_init))) {
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
if (skb_queue_empty(&init_req.cmd_q))
bt_cb(skb)->req.start = true;
skb_queue_tail(&init_req.cmd_q, skb);
}
skb_queue_purge(&hdev->driver_init);
hci_req_run(&init_req, NULL);
/* Reset */
if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
hci_reset_req(req, 0);
......@@ -494,6 +589,7 @@ static void hci_set_le_support(struct hci_request *req)
static void hci_init3_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
u8 p;
if (hdev->commands[5] & 0x10)
hci_setup_link_policy(req);
......@@ -502,6 +598,15 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
hci_set_le_support(req);
hci_update_ad(req);
}
/* Read features beyond page 1 if available */
for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
struct hci_cp_read_local_ext_features cp;
cp.page = p;
hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
sizeof(cp), &cp);
}
}
static int __hci_init(struct hci_dev *hdev)
......@@ -818,6 +923,12 @@ static void hci_inq_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
}
static int wait_inquiry(void *word)
{
schedule();
return signal_pending(current);
}
int hci_inquiry(void __user *arg)
{
__u8 __user *ptr = arg;
......@@ -849,6 +960,13 @@ int hci_inquiry(void __user *arg)
timeo);
if (err < 0)
goto done;
/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
* cleared). If it is interrupted by a signal, return -EINTR.
*/
if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
TASK_INTERRUPTIBLE))
return -EINTR;
}
/* for unlimited number of responses we will use buffer with
......@@ -999,26 +1117,33 @@ int hci_dev_open(__u16 dev)
goto done;
}
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
/* Treat all non BR/EDR controllers as raw devices if
enable_hs is not set */
if (hdev->dev_type != HCI_BREDR && !enable_hs)
set_bit(HCI_RAW, &hdev->flags);
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
if (!test_bit(HCI_RAW, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
ret = __hci_init(hdev);
clear_bit(HCI_INIT, &hdev->flags);
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
ret = hdev->setup(hdev);
if (!ret) {
/* Treat all non BR/EDR controllers as raw devices if
* enable_hs is not set.
*/
if (hdev->dev_type != HCI_BREDR && !enable_hs)
set_bit(HCI_RAW, &hdev->flags);
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
if (!test_bit(HCI_RAW, &hdev->flags))
ret = __hci_init(hdev);
}
clear_bit(HCI_INIT, &hdev->flags);
if (!ret) {
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
......@@ -1123,6 +1248,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
kfree_skb(hdev->recv_evt);
hdev->recv_evt = NULL;
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
......@@ -1861,8 +1989,8 @@ static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
struct hci_cp_le_set_scan_enable cp;
memset(&cp, 0, sizeof(cp));
cp.enable = 1;
cp.filter_dup = 1;
cp.enable = LE_SCAN_ENABLE;
cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
......@@ -1896,7 +2024,7 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
return err;
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
msecs_to_jiffies(timeout));
timeout);
return 0;
}
......@@ -2006,7 +2134,6 @@ struct hci_dev *hci_alloc_dev(void)
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
skb_queue_head_init(&hdev->driver_init);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
......@@ -2025,8 +2152,6 @@ EXPORT_SYMBOL(hci_alloc_dev);
/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
skb_queue_purge(&hdev->driver_init);
/* will free via device release */
put_device(&hdev->dev);
}
......@@ -2527,7 +2652,8 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
}
/* Queue a command to an asynchronous HCI request */
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
u8 event)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
......@@ -2551,9 +2677,16 @@ void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
if (skb_queue_empty(&req->cmd_q))
bt_cb(skb)->req.start = true;
bt_cb(skb)->req.event = event;
skb_queue_tail(&req->cmd_q, skb);
}
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
{
hci_req_add_ev(req, opcode, plen, param, 0);
}
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
{
......@@ -3309,32 +3442,6 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
req_complete(hdev, status);
}
void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
{
hci_req_complete_t req_complete = NULL;
BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
if (status) {
hci_req_cmd_complete(hdev, opcode, status);
return;
}
/* No need to handle success status if there are more commands */
if (!hci_req_is_complete(hdev))
return;
if (hdev->sent_cmd)
req_complete = bt_cb(hdev->sent_cmd)->req.complete;
/* If the request doesn't have a complete callback or there
* are other commands/requests in the hdev queue we consider
* this request as completed.
*/
if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
hci_req_cmd_complete(hdev, opcode, status);
}
static void hci_rx_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
......
......@@ -48,13 +48,13 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
}
clear_bit(HCI_INQUIRY, &hdev->flags);
smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
wake_up_bit(&hdev->flags, HCI_INQUIRY);
hci_dev_lock(hdev);
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
}
......@@ -433,9 +433,9 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
if (!status) {
if (sent->mode)
hdev->host_features[0] |= LMP_HOST_SSP;
hdev->features[1][0] |= LMP_HOST_SSP;
else
hdev->host_features[0] &= ~LMP_HOST_SSP;
hdev->features[1][0] &= ~LMP_HOST_SSP;
}
if (test_bit(HCI_MGMT, &hdev->dev_flags))
......@@ -493,18 +493,18 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
/* Adjust default settings according to features
* supported by device. */
if (hdev->features[0] & LMP_3SLOT)
if (hdev->features[0][0] & LMP_3SLOT)
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
if (hdev->features[0] & LMP_5SLOT)
if (hdev->features[0][0] & LMP_5SLOT)
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
if (hdev->features[1] & LMP_HV2) {
if (hdev->features[0][1] & LMP_HV2) {
hdev->pkt_type |= (HCI_HV2);
hdev->esco_type |= (ESCO_HV2);
}
if (hdev->features[1] & LMP_HV3) {
if (hdev->features[0][1] & LMP_HV3) {
hdev->pkt_type |= (HCI_HV3);
hdev->esco_type |= (ESCO_HV3);
}
......@@ -512,26 +512,26 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
if (lmp_esco_capable(hdev))
hdev->esco_type |= (ESCO_EV3);
if (hdev->features[4] & LMP_EV4)
if (hdev->features[0][4] & LMP_EV4)
hdev->esco_type |= (ESCO_EV4);
if (hdev->features[4] & LMP_EV5)
if (hdev->features[0][4] & LMP_EV5)
hdev->esco_type |= (ESCO_EV5);
if (hdev->features[5] & LMP_EDR_ESCO_2M)
if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
hdev->esco_type |= (ESCO_2EV3);
if (hdev->features[5] & LMP_EDR_ESCO_3M)
if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
hdev->esco_type |= (ESCO_3EV3);
if (hdev->features[5] & LMP_EDR_3S_ESCO)
if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
hdev->features[0], hdev->features[1],
hdev->features[2], hdev->features[3],
hdev->features[4], hdev->features[5],
hdev->features[6], hdev->features[7]);
hdev->features[0][0], hdev->features[0][1],
hdev->features[0][2], hdev->features[0][3],
hdev->features[0][4], hdev->features[0][5],
hdev->features[0][6], hdev->features[0][7]);
}
static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
......@@ -544,14 +544,10 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
if (rp->status)
return;
switch (rp->page) {
case 0:
memcpy(hdev->features, rp->features, 8);
break;
case 1:
memcpy(hdev->host_features, rp->features, 8);
break;
}
hdev->max_page = rp->max_page;
if (rp->page < HCI_MAX_PAGES)
memcpy(hdev->features[rp->page], rp->features, 8);
}
static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
......@@ -968,7 +964,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
return;
switch (cp->enable) {
case LE_SCANNING_ENABLED:
case LE_SCAN_ENABLE:
if (status) {
hci_dev_lock(hdev);
mgmt_start_discovery_failed(hdev, status);
......@@ -983,7 +979,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
hci_dev_unlock(hdev);
break;
case LE_SCANNING_DISABLED:
case LE_SCAN_DISABLE:
if (status) {
hci_dev_lock(hdev);
mgmt_stop_discovery_failed(hdev, status);
......@@ -1046,14 +1042,14 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
if (!status) {
if (sent->le)
hdev->host_features[0] |= LMP_HOST_LE;
hdev->features[1][0] |= LMP_HOST_LE;
else
hdev->host_features[0] &= ~LMP_HOST_LE;
hdev->features[1][0] &= ~LMP_HOST_LE;
if (sent->simul)
hdev->host_features[0] |= LMP_HOST_LE_BREDR;
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
else
hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
}
if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
......@@ -1190,7 +1186,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
hci_conn_drop(conn);
}
}
......@@ -1217,7 +1213,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
hci_conn_drop(conn);
}
}
......@@ -1379,7 +1375,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
hci_conn_drop(conn);
}
}
......@@ -1406,7 +1402,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
hci_conn_put(conn);
hci_conn_drop(conn);
}
}
......@@ -1600,13 +1596,14 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
return;
smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
wake_up_bit(&hdev->flags, HCI_INQUIRY);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
return;
......@@ -1705,7 +1702,6 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
} else
conn->state = BT_CONNECTED;
hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
if (test_bit(HCI_AUTH, &hdev->flags))
......@@ -1752,42 +1748,6 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
void hci_conn_accept(struct hci_conn *conn, int mask)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("conn %p", conn);
conn->state = BT_CONFIG;
if (!lmp_esco_capable(hdev)) {
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
cp.role = 0x00; /* Become master */
else
cp.role = 0x01; /* Remain slave */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else /* lmp_esco_capable(hdev)) */ {
struct hci_cp_accept_sync_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.content_format = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
}
}
static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
......@@ -1859,7 +1819,6 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
} else {
conn->state = BT_CONNECT2;
hci_proto_connect_cfm(conn, 0);
hci_conn_put(conn);
}
} else {
/* Connection rejected */
......@@ -1966,14 +1925,14 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
} else {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
hci_conn_drop(conn);
}
} else {
hci_auth_cfm(conn, ev->status);
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_put(conn);
hci_conn_drop(conn);
}
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
......@@ -2057,7 +2016,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (ev->status && conn->state == BT_CONNECTED) {
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_put(conn);
hci_conn_drop(conn);
goto unlock;
}
......@@ -2066,7 +2025,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
hci_conn_drop(conn);
} else
hci_encrypt_cfm(conn, ev->status, ev->encrypt);
}
......@@ -2113,7 +2072,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
goto unlock;
if (!ev->status)
memcpy(conn->features, ev->features, 8);
memcpy(conn->features[0], ev->features, 8);
if (conn->state != BT_CONFIG)
goto unlock;
......@@ -2141,7 +2100,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
hci_conn_drop(conn);
}
unlock:
......@@ -2462,7 +2421,9 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (opcode != HCI_OP_NOP)
del_timer(&hdev->cmd_timer);
hci_req_cmd_status(hdev, opcode, ev->status);
if (ev->status ||
(hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
hci_req_cmd_complete(hdev, opcode, ev->status);
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
......@@ -2679,7 +2640,7 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (conn->state == BT_CONNECTED) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
hci_conn_put(conn);
hci_conn_drop(conn);
}
if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
......@@ -2782,7 +2743,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
conn->key_type = ev->key_type;
hci_conn_put(conn);
hci_conn_drop(conn);
}
if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
......@@ -2923,6 +2884,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
if (!conn)
goto unlock;
if (ev->page < HCI_MAX_PAGES)
memcpy(conn->features[ev->page], ev->features, 8);
if (!ev->status && ev->page == 0x01) {
struct inquiry_entry *ie;
......@@ -2930,8 +2894,19 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
if (ie)
ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
if (ev->features[0] & LMP_HOST_SSP)
if (ev->features[0] & LMP_HOST_SSP) {
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
} else {
/* It is mandatory by the Bluetooth specification that
* Extended Inquiry Results are only used when Secure
* Simple Pairing is enabled, but some devices violate
* this.
*
* To make these devices work, the internal SSP
* enabled flag needs to be cleared if the remote host
* features do not indicate SSP support */
clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
}
if (conn->state != BT_CONFIG)
......@@ -2951,7 +2926,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
hci_conn_drop(conn);
}
unlock:
......@@ -2985,7 +2960,6 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
break;
......@@ -3084,7 +3058,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
if (ev->status && conn->state == BT_CONNECTED) {
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_put(conn);
hci_conn_drop(conn);
goto unlock;
}
......@@ -3093,13 +3067,13 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
hci_conn_put(conn);
hci_conn_drop(conn);
} else {
hci_auth_cfm(conn, ev->status);
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_put(conn);
hci_conn_drop(conn);
}
unlock:
......@@ -3360,7 +3334,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
ev->status);
hci_conn_put(conn);
hci_conn_drop(conn);
unlock:
hci_dev_unlock(hdev);
......@@ -3371,11 +3345,16 @@ static void hci_remote_host_features_evt(struct hci_dev *hdev,
{
struct hci_ev_remote_host_features *ev = (void *) skb->data;
struct inquiry_entry *ie;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn)
memcpy(conn->features[1], ev->features, 8);
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie)
ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
......@@ -3448,9 +3427,8 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
hci_conn_hold(hcon);
hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_put(hcon);
hci_conn_drop(hcon);
hci_conn_hold_device(hcon);
hci_conn_add_sysfs(hcon);
amp_physical_cfm(bredr_hcon, hcon);
......@@ -3584,7 +3562,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
hci_proto_connect_cfm(conn, ev->status);
......@@ -3698,8 +3675,27 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_event_hdr *hdr = (void *) skb->data;
__u8 event = hdr->evt;
hci_dev_lock(hdev);
/* Received events are (currently) only needed when a request is
* ongoing so avoid unnecessary memory allocation.
*/
if (hdev->req_status == HCI_REQ_PEND) {
kfree_skb(hdev->recv_evt);
hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
}
hci_dev_unlock(hdev);
skb_pull(skb, HCI_EVENT_HDR_SIZE);
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
u16 opcode = __le16_to_cpu(hdr->opcode);
hci_req_cmd_complete(hdev, opcode, 0);
}
switch (event) {
case HCI_EV_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
......
......@@ -48,10 +48,10 @@ static ssize_t show_link_features(struct device *dev,
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
conn->features[0], conn->features[1],
conn->features[2], conn->features[3],
conn->features[4], conn->features[5],
conn->features[6], conn->features[7]);
conn->features[0][0], conn->features[0][1],
conn->features[0][2], conn->features[0][3],
conn->features[0][4], conn->features[0][5],
conn->features[0][6], conn->features[0][7]);
}
#define LINK_ATTR(_name, _mode, _show, _store) \
......@@ -146,7 +146,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
}
device_del(&conn->dev);
put_device(&conn->dev);
hci_dev_put(hdev);
}
......@@ -234,10 +233,10 @@ static ssize_t show_features(struct device *dev,
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
hdev->features[0], hdev->features[1],
hdev->features[2], hdev->features[3],
hdev->features[4], hdev->features[5],
hdev->features[6], hdev->features[7]);
hdev->features[0][0], hdev->features[0][1],
hdev->features[0][2], hdev->features[0][3],
hdev->features[0][4], hdev->features[0][5],
hdev->features[0][6], hdev->features[0][7]);
}
static ssize_t show_manufacturer(struct device *dev,
......
/*
HIDP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org>
Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
......@@ -20,6 +21,7 @@
SOFTWARE IS DISCLAIMED.
*/
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/file.h>
#include <linux/kthread.h>
......@@ -59,39 +61,20 @@ static unsigned char hidp_keycode[256] = {
static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
{
struct hidp_session *session;
BT_DBG("");
static int hidp_session_probe(struct l2cap_conn *conn,
struct l2cap_user *user);
static void hidp_session_remove(struct l2cap_conn *conn,
struct l2cap_user *user);
static int hidp_session_thread(void *arg);
static void hidp_session_terminate(struct hidp_session *s);
list_for_each_entry(session, &hidp_session_list, list) {
if (!bacmp(bdaddr, &session->bdaddr))
return session;
}
return NULL;
}
static void __hidp_link_session(struct hidp_session *session)
{
list_add(&session->list, &hidp_session_list);
}
static void __hidp_unlink_session(struct hidp_session *session)
{
hci_conn_put_device(session->conn);
list_del(&session->list);
}
static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
{
memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
ci->flags = session->flags;
ci->state = session->state;
ci->state = BT_CONNECTED;
ci->vendor = 0x0000;
ci->product = 0x0000;
......@@ -115,58 +98,80 @@ static void __hidp_copy_session(struct hidp_session *session, struct hidp_connin
}
}
static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
unsigned int type, unsigned int code, int value)
/* assemble skb, queue message on @transmit and wake up the session thread */
static int hidp_send_message(struct hidp_session *session, struct socket *sock,
struct sk_buff_head *transmit, unsigned char hdr,
const unsigned char *data, int size)
{
unsigned char newleds;
struct sk_buff *skb;
struct sock *sk = sock->sk;
BT_DBG("session %p type %d code %d value %d", session, type, code, value);
if (type != EV_LED)
return -1;
newleds = (!!test_bit(LED_KANA, dev->led) << 3) |
(!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) |
(!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
if (session->leds == newleds)
return 0;
BT_DBG("session %p data %p size %d", session, data, size);
session->leds = newleds;
if (atomic_read(&session->terminate))
return -EIO;
skb = alloc_skb(3, GFP_ATOMIC);
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
*skb_put(skb, 1) = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
*skb_put(skb, 1) = 0x01;
*skb_put(skb, 1) = newleds;
skb_queue_tail(&session->intr_transmit, skb);
*skb_put(skb, 1) = hdr;
if (data && size > 0)
memcpy(skb_put(skb, size), data, size);
hidp_schedule(session);
skb_queue_tail(transmit, skb);
wake_up_interruptible(sk_sleep(sk));
return 0;
}
static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
static int hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, const unsigned char *data,
int size)
{
struct hid_device *hid = input_get_drvdata(dev);
struct hidp_session *session = hid->driver_data;
return hidp_send_message(session, session->ctrl_sock,
&session->ctrl_transmit, hdr, data, size);
}
return hidp_queue_event(session, dev, type, code, value);
static int hidp_send_intr_message(struct hidp_session *session,
unsigned char hdr, const unsigned char *data,
int size)
{
return hidp_send_message(session, session->intr_sock,
&session->intr_transmit, hdr, data, size);
}
static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
static int hidp_input_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
struct hidp_session *session = input_get_drvdata(dev);
unsigned char newleds;
unsigned char hdr, data[2];
BT_DBG("session %p type %d code %d value %d",
session, type, code, value);
if (type != EV_LED)
return -1;
newleds = (!!test_bit(LED_KANA, dev->led) << 3) |
(!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) |
(!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
return hidp_queue_event(session, dev, type, code, value);
if (session->leds == newleds)
return 0;
session->leds = newleds;
hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
data[0] = 0x01;
data[1] = newleds;
return hidp_send_intr_message(session, hdr, data, 2);
}
static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
......@@ -224,71 +229,9 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
input_sync(dev);
}
static int __hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data,
int size)
{
struct sk_buff *skb;
BT_DBG("session %p data %p size %d", session, data, size);
if (atomic_read(&session->terminate))
return -EIO;
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
*skb_put(skb, 1) = hdr;
if (data && size > 0)
memcpy(skb_put(skb, size), data, size);
skb_queue_tail(&session->ctrl_transmit, skb);
return 0;
}
static int hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data, int size)
{
int err;
err = __hidp_send_ctrl_message(session, hdr, data, size);
hidp_schedule(session);
return err;
}
static int hidp_queue_report(struct hidp_session *session,
unsigned char *data, int size)
{
struct sk_buff *skb;
BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
*skb_put(skb, 1) = 0xa2;
if (size > 0)
memcpy(skb_put(skb, size), data, size);
skb_queue_tail(&session->intr_transmit, skb);
hidp_schedule(session);
return 0;
}
static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
{
unsigned char buf[32];
unsigned char buf[32], hdr;
int rsize;
rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
......@@ -296,8 +239,9 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
return -EIO;
hid_output_report(report, buf);
hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
return hidp_queue_report(session, buf, rsize);
return hidp_send_intr_message(session, hdr, buf, rsize);
}
static int hidp_get_raw_report(struct hid_device *hid,
......@@ -336,17 +280,19 @@ static int hidp_get_raw_report(struct hid_device *hid,
session->waiting_report_number = numbered_reports ? report_number : -1;
set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
data[0] = report_number;
ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
ret = hidp_send_ctrl_message(session, report_type, data, 1);
if (ret)
goto err;
/* Wait for the return of the report. The returned report
gets put in session->report_return. */
while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
!atomic_read(&session->terminate)) {
int res;
res = wait_event_interruptible_timeout(session->report_queue,
!test_bit(HIDP_WAITING_FOR_RETURN, &session->flags),
!test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)
|| atomic_read(&session->terminate),
5*HZ);
if (res == 0) {
/* timeout */
......@@ -389,14 +335,11 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s
struct hidp_session *session = hid->driver_data;
int ret;
switch (report_type) {
case HID_FEATURE_REPORT:
report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
break;
case HID_OUTPUT_REPORT:
report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT;
break;
default:
if (report_type == HID_OUTPUT_REPORT) {
report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
return hidp_send_intr_message(session, report_type,
data, count);
} else if (report_type != HID_FEATURE_REPORT) {
return -EINVAL;
}
......@@ -405,17 +348,19 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s
/* Set up our wait, and send the report request to the device. */
set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
count);
report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
ret = hidp_send_ctrl_message(session, report_type, data, count);
if (ret)
goto err;
/* Wait for the ACK from the device. */
while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags) &&
!atomic_read(&session->terminate)) {
int res;
res = wait_event_interruptible_timeout(session->report_queue,
!test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags),
!test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)
|| atomic_read(&session->terminate),
10*HZ);
if (res == 0) {
/* timeout */
......@@ -446,8 +391,7 @@ static void hidp_idle_timeout(unsigned long arg)
{
struct hidp_session *session = (struct hidp_session *) arg;
atomic_inc(&session->terminate);
wake_up_process(session->task);
hidp_session_terminate(session);
}
static void hidp_set_timer(struct hidp_session *session)
......@@ -490,12 +434,12 @@ static void hidp_process_handshake(struct hidp_session *session,
case HIDP_HSHK_ERR_FATAL:
/* Device requests a reboot, as this is the only way this error
* can be recovered. */
__hidp_send_ctrl_message(session,
hidp_send_ctrl_message(session,
HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0);
break;
default:
__hidp_send_ctrl_message(session,
hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
break;
}
......@@ -515,8 +459,7 @@ static void hidp_process_hid_control(struct hidp_session *session,
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
atomic_inc(&session->terminate);
wake_up_process(current);
hidp_session_terminate(session);
}
}
......@@ -544,7 +487,7 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
break;
default:
__hidp_send_ctrl_message(session,
hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
}
......@@ -591,7 +534,7 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
break;
default:
__hidp_send_ctrl_message(session,
hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0);
break;
}
......@@ -642,32 +585,24 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
static void hidp_process_intr_transmit(struct hidp_session *session)
/* dequeue message from @transmit and send via @sock */
static void hidp_process_transmit(struct hidp_session *session,
struct sk_buff_head *transmit,
struct socket *sock)
{
struct sk_buff *skb;
int ret;
BT_DBG("session %p", session);
while ((skb = skb_dequeue(&session->intr_transmit))) {
if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
skb_queue_head(&session->intr_transmit, skb);
while ((skb = skb_dequeue(transmit))) {
ret = hidp_send_frame(sock, skb->data, skb->len);
if (ret == -EAGAIN) {
skb_queue_head(transmit, skb);
break;
}
hidp_set_timer(session);
kfree_skb(skb);
}
}
static void hidp_process_ctrl_transmit(struct hidp_session *session)
{
struct sk_buff *skb;
BT_DBG("session %p", session);
while ((skb = skb_dequeue(&session->ctrl_transmit))) {
if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
skb_queue_head(&session->ctrl_transmit, skb);
} else if (ret < 0) {
hidp_session_terminate(session);
kfree_skb(skb);
break;
}
......@@ -676,122 +611,6 @@ static void hidp_process_ctrl_transmit(struct hidp_session *session)
}
}
static int hidp_session(void *arg)
{
struct hidp_session *session = arg;
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
struct sk_buff *skb;
wait_queue_t ctrl_wait, intr_wait;
BT_DBG("session %p", session);
__module_get(THIS_MODULE);
set_user_nice(current, -15);
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
session->waiting_for_startup = 0;
wake_up_interruptible(&session->startup_queue);
set_current_state(TASK_INTERRUPTIBLE);
while (!atomic_read(&session->terminate)) {
if (ctrl_sk->sk_state != BT_CONNECTED ||
intr_sk->sk_state != BT_CONNECTED)
break;
while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
hidp_recv_intr_frame(session, skb);
else
kfree_skb(skb);
}
hidp_process_intr_transmit(session);
while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
hidp_recv_ctrl_frame(session, skb);
else
kfree_skb(skb);
}
hidp_process_ctrl_transmit(session);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
atomic_inc(&session->terminate);
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
wake_up_interruptible(&session->report_queue);
down_write(&hidp_session_sem);
hidp_del_timer(session);
if (session->input) {
input_unregister_device(session->input);
session->input = NULL;
}
if (session->hid) {
hid_destroy_device(session->hid);
session->hid = NULL;
}
/* Wakeup user-space polling for socket errors */
session->intr_sock->sk->sk_err = EUNATCH;
session->ctrl_sock->sk->sk_err = EUNATCH;
hidp_schedule(session);
fput(session->intr_sock->file);
wait_event_timeout(*(sk_sleep(ctrl_sk)),
(ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
fput(session->ctrl_sock->file);
__hidp_unlink_session(session);
up_write(&hidp_session_sem);
kfree(session->rd_data);
kfree(session);
module_put_and_exit(0);
return 0;
}
static struct hci_conn *hidp_get_connection(struct hidp_session *session)
{
bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
struct hci_conn *conn;
struct hci_dev *hdev;
hdev = hci_get_route(dst, src);
if (!hdev)
return NULL;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (conn)
hci_conn_hold_device(conn);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
return conn;
}
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
......@@ -839,7 +658,7 @@ static int hidp_setup_input(struct hidp_session *session,
input->relbit[0] |= BIT_MASK(REL_WHEEL);
}
input->dev.parent = &session->conn->dev;
input->dev.parent = &session->conn->hcon->dev;
input->event = hidp_input_event;
......@@ -898,7 +717,6 @@ static struct hid_ll_driver hidp_hid_driver = {
.stop = hidp_stop,
.open = hidp_open,
.close = hidp_close,
.hidinput_input_event = hidp_hidinput_event,
};
/* This function sets up the hid device. It does not add it
......@@ -943,7 +761,7 @@ static int hidp_setup_hid(struct hidp_session *session,
snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
&bt_sk(session->ctrl_sock->sk)->dst);
hid->dev.parent = &session->conn->dev;
hid->dev.parent = &session->conn->hcon->dev;
hid->ll_driver = &hidp_hid_driver;
hid->hid_get_raw_report = hidp_get_raw_report;
......@@ -965,80 +783,217 @@ static int hidp_setup_hid(struct hidp_session *session,
return err;
}
int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
/* initialize session devices */
static int hidp_session_dev_init(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct hidp_session *session, *s;
int vendor, product;
int err;
int ret;
BT_DBG("");
if (req->rd_size > 0) {
ret = hidp_setup_hid(session, req);
if (ret && ret != -ENODEV)
return ret;
}
if (bacmp(&bt_sk(ctrl_sock->sk)->src, &bt_sk(intr_sock->sk)->src) ||
bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
return -ENOTUNIQ;
if (!session->hid) {
ret = hidp_setup_input(session, req);
if (ret < 0)
return ret;
}
BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
return 0;
}
down_write(&hidp_session_sem);
/* destroy session devices */
static void hidp_session_dev_destroy(struct hidp_session *session)
{
if (session->hid)
put_device(&session->hid->dev);
else if (session->input)
input_put_device(session->input);
s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
if (s && s->state == BT_CONNECTED) {
up_write(&hidp_session_sem);
return -EEXIST;
}
kfree(session->rd_data);
session->rd_data = NULL;
}
session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
if (!session) {
up_write(&hidp_session_sem);
return -ENOMEM;
}
/* add HID/input devices to their underlying bus systems */
static int hidp_session_dev_add(struct hidp_session *session)
{
int ret;
bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
/* Both HID and input systems drop a ref-count when unregistering the
* device but they don't take a ref-count when registering them. Work
* around this by explicitly taking a refcount during registration
* which is dropped automatically by unregistering the devices. */
session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->chan->omtu,
l2cap_pi(ctrl_sock->sk)->chan->imtu);
session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->chan->omtu,
l2cap_pi(intr_sock->sk)->chan->imtu);
if (session->hid) {
ret = hid_add_device(session->hid);
if (ret)
return ret;
get_device(&session->hid->dev);
} else if (session->input) {
ret = input_register_device(session->input);
if (ret)
return ret;
input_get_device(session->input);
}
BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
return 0;
}
session->ctrl_sock = ctrl_sock;
session->intr_sock = intr_sock;
session->state = BT_CONNECTED;
/* remove HID/input devices from their bus systems */
static void hidp_session_dev_del(struct hidp_session *session)
{
if (session->hid)
hid_destroy_device(session->hid);
else if (session->input)
input_unregister_device(session->input);
}
session->conn = hidp_get_connection(session);
if (!session->conn) {
err = -ENOTCONN;
goto failed;
}
/*
* Create new session object
* Allocate session object, initialize static fields, copy input data into the
* object and take a reference to all sub-objects.
* This returns 0 on success and puts a pointer to the new session object in
* \out. Otherwise, an error code is returned.
* The new session object has an initial ref-count of 1.
*/
static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
struct socket *ctrl_sock,
struct socket *intr_sock,
struct hidp_connadd_req *req,
struct l2cap_conn *conn)
{
struct hidp_session *session;
int ret;
struct bt_sock *ctrl, *intr;
ctrl = bt_sk(ctrl_sock->sk);
intr = bt_sk(intr_sock->sk);
setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
/* object and runtime management */
kref_init(&session->ref);
atomic_set(&session->state, HIDP_SESSION_IDLING);
init_waitqueue_head(&session->state_queue);
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
/* connection management */
bacpy(&session->bdaddr, bdaddr);
session->conn = conn;
session->user.probe = hidp_session_probe;
session->user.remove = hidp_session_remove;
session->ctrl_sock = ctrl_sock;
session->intr_sock = intr_sock;
skb_queue_head_init(&session->ctrl_transmit);
skb_queue_head_init(&session->intr_transmit);
session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl)->chan->omtu,
l2cap_pi(ctrl)->chan->imtu);
session->intr_mtu = min_t(uint, l2cap_pi(intr)->chan->omtu,
l2cap_pi(intr)->chan->imtu);
session->idle_to = req->idle_to;
/* device management */
setup_timer(&session->timer, hidp_idle_timeout,
(unsigned long)session);
/* session data */
mutex_init(&session->report_mutex);
init_waitqueue_head(&session->report_queue);
init_waitqueue_head(&session->startup_queue);
session->waiting_for_startup = 1;
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
session->idle_to = req->idle_to;
__hidp_link_session(session);
ret = hidp_session_dev_init(session, req);
if (ret)
goto err_free;
if (req->rd_size > 0) {
err = hidp_setup_hid(session, req);
if (err && err != -ENODEV)
goto purge;
}
l2cap_conn_get(session->conn);
get_file(session->intr_sock->file);
get_file(session->ctrl_sock->file);
*out = session;
return 0;
if (!session->hid) {
err = hidp_setup_input(session, req);
if (err < 0)
goto purge;
err_free:
kfree(session);
return ret;
}
/* increase ref-count of the given session by one */
static void hidp_session_get(struct hidp_session *session)
{
kref_get(&session->ref);
}
/* release callback */
static void session_free(struct kref *ref)
{
struct hidp_session *session = container_of(ref, struct hidp_session,
ref);
hidp_session_dev_destroy(session);
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
fput(session->intr_sock->file);
fput(session->ctrl_sock->file);
l2cap_conn_put(session->conn);
kfree(session);
}
/* decrease ref-count of the given session by one */
static void hidp_session_put(struct hidp_session *session)
{
kref_put(&session->ref, session_free);
}
/*
* Search the list of active sessions for a session with target address
* \bdaddr. You must hold at least a read-lock on \hidp_session_sem. As long as
* you do not release this lock, the session objects cannot vanish and you can
* safely take a reference to the session yourself.
*/
static struct hidp_session *__hidp_session_find(const bdaddr_t *bdaddr)
{
struct hidp_session *session;
list_for_each_entry(session, &hidp_session_list, list) {
if (!bacmp(bdaddr, &session->bdaddr))
return session;
}
hidp_set_timer(session);
return NULL;
}
/*
* Same as __hidp_session_find() but no locks must be held. This also takes a
* reference of the returned session (if non-NULL) so you must drop this
* reference if you no longer use the object.
*/
static struct hidp_session *hidp_session_find(const bdaddr_t *bdaddr)
{
struct hidp_session *session;
down_read(&hidp_session_sem);
session = __hidp_session_find(bdaddr);
if (session)
hidp_session_get(session);
up_read(&hidp_session_sem);
return session;
}
/*
* Start session synchronously
* This starts a session thread and waits until initialization
* is done or returns an error if it couldn't be started.
* If this returns 0 the session thread is up and running. You must call
* hipd_session_stop_sync() before deleting any runtime resources.
*/
static int hidp_session_start_sync(struct hidp_session *session)
{
unsigned int vendor, product;
if (session->hid) {
vendor = session->hid->vendor;
......@@ -1051,98 +1006,320 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
product = 0x0000;
}
session->task = kthread_run(hidp_session, session, "khidpd_%04x%04x",
vendor, product);
if (IS_ERR(session->task)) {
err = PTR_ERR(session->task);
goto unlink;
}
session->task = kthread_run(hidp_session_thread, session,
"khidpd_%04x%04x", vendor, product);
if (IS_ERR(session->task))
return PTR_ERR(session->task);
while (session->waiting_for_startup) {
wait_event_interruptible(session->startup_queue,
!session->waiting_for_startup);
}
while (atomic_read(&session->state) <= HIDP_SESSION_IDLING)
wait_event(session->state_queue,
atomic_read(&session->state) > HIDP_SESSION_IDLING);
if (session->hid)
err = hid_add_device(session->hid);
else
err = input_register_device(session->input);
return 0;
}
if (err < 0) {
atomic_inc(&session->terminate);
wake_up_process(session->task);
up_write(&hidp_session_sem);
return err;
}
/*
* Terminate session thread
* Wake up session thread and notify it to stop. This is asynchronous and
* returns immediately. Call this whenever a runtime error occurs and you want
* the session to stop.
* Note: wake_up_process() performs any necessary memory-barriers for us.
*/
static void hidp_session_terminate(struct hidp_session *session)
{
atomic_inc(&session->terminate);
wake_up_process(session->task);
}
if (session->input) {
hidp_send_ctrl_message(session,
HIDP_TRANS_SET_PROTOCOL | HIDP_PROTO_BOOT, NULL, 0);
session->flags |= (1 << HIDP_BOOT_PROTOCOL_MODE);
/*
* Probe HIDP session
* This is called from the l2cap_conn core when our l2cap_user object is bound
* to the hci-connection. We get the session via the \user object and can now
* start the session thread, register the HID/input devices and link it into
* the global session list.
* The global session-list owns its own reference to the session object so you
* can drop your own reference after registering the l2cap_user object.
*/
static int hidp_session_probe(struct l2cap_conn *conn,
struct l2cap_user *user)
{
struct hidp_session *session = container_of(user,
struct hidp_session,
user);
struct hidp_session *s;
int ret;
down_write(&hidp_session_sem);
session->leds = 0xff;
hidp_input_event(session->input, EV_LED, 0, 0);
/* check that no other session for this device exists */
s = __hidp_session_find(&session->bdaddr);
if (s) {
ret = -EEXIST;
goto out_unlock;
}
ret = hidp_session_start_sync(session);
if (ret)
goto out_unlock;
ret = hidp_session_dev_add(session);
if (ret)
goto out_stop;
hidp_session_get(session);
list_add(&session->list, &hidp_session_list);
ret = 0;
goto out_unlock;
out_stop:
hidp_session_terminate(session);
out_unlock:
up_write(&hidp_session_sem);
return 0;
return ret;
}
/*
* Remove HIDP session
* Called from the l2cap_conn core when either we explicitly unregistered
* the l2cap_user object or if the underlying connection is shut down.
* We signal the hidp-session thread to shut down, unregister the HID/input
* devices and unlink the session from the global list.
* This drops the reference to the session that is owned by the global
* session-list.
* Note: We _must_ not synchronosly wait for the session-thread to shut down.
* This is, because the session-thread might be waiting for an HCI lock that is
* held while we are called. Therefore, we only unregister the devices and
* notify the session-thread to terminate. The thread itself owns a reference
* to the session object so it can safely shut down.
*/
static void hidp_session_remove(struct l2cap_conn *conn,
struct l2cap_user *user)
{
struct hidp_session *session = container_of(user,
struct hidp_session,
user);
down_write(&hidp_session_sem);
hidp_session_terminate(session);
hidp_session_dev_del(session);
list_del(&session->list);
up_write(&hidp_session_sem);
hidp_session_put(session);
}
/*
* Session Worker
* This performs the actual main-loop of the HIDP worker. We first check
* whether the underlying connection is still alive, then parse all pending
* messages and finally send all outstanding messages.
*/
static void hidp_session_run(struct hidp_session *session)
{
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
struct sk_buff *skb;
for (;;) {
/*
* This thread can be woken up two ways:
* - You call hidp_session_terminate() which sets the
* session->terminate flag and wakes this thread up.
* - Via modifying the socket state of ctrl/intr_sock. This
* thread is woken up by ->sk_state_changed().
*
* Note: set_current_state() performs any necessary
* memory-barriers for us.
*/
set_current_state(TASK_INTERRUPTIBLE);
if (atomic_read(&session->terminate))
break;
if (ctrl_sk->sk_state != BT_CONNECTED ||
intr_sk->sk_state != BT_CONNECTED)
break;
/* parse incoming intr-skbs */
while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
hidp_recv_intr_frame(session, skb);
else
kfree_skb(skb);
}
/* send pending intr-skbs */
hidp_process_transmit(session, &session->intr_transmit,
session->intr_sock);
unlink:
/* parse incoming ctrl-skbs */
while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
hidp_recv_ctrl_frame(session, skb);
else
kfree_skb(skb);
}
/* send pending ctrl-skbs */
hidp_process_transmit(session, &session->ctrl_transmit,
session->ctrl_sock);
schedule();
}
atomic_inc(&session->terminate);
set_current_state(TASK_RUNNING);
}
/*
* HIDP session thread
* This thread runs the I/O for a single HIDP session. Startup is synchronous
* which allows us to take references to ourself here instead of doing that in
* the caller.
* When we are ready to run we notify the caller and call hidp_session_run().
*/
static int hidp_session_thread(void *arg)
{
struct hidp_session *session = arg;
wait_queue_t ctrl_wait, intr_wait;
BT_DBG("session %p", session);
/* initialize runtime environment */
hidp_session_get(session);
__module_get(THIS_MODULE);
set_user_nice(current, -15);
hidp_set_timer(session);
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
/* This memory barrier is paired with wq_has_sleeper(). See
* sock_poll_wait() for more information why this is needed. */
smp_mb();
/* notify synchronous startup that we're ready */
atomic_inc(&session->state);
wake_up(&session->state_queue);
/* run session */
hidp_session_run(session);
/* cleanup runtime environment */
remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
wake_up_interruptible(&session->report_queue);
hidp_del_timer(session);
if (session->input) {
input_unregister_device(session->input);
session->input = NULL;
/*
* If we stopped ourself due to any internal signal, we should try to
* unregister our own session here to avoid having it linger until the
* parent l2cap_conn dies or user-space cleans it up.
* This does not deadlock as we don't do any synchronous shutdown.
* Instead, this call has the same semantics as if user-space tried to
* delete the session.
*/
l2cap_unregister_user(session->conn, &session->user);
hidp_session_put(session);
module_put_and_exit(0);
return 0;
}
static int hidp_verify_sockets(struct socket *ctrl_sock,
struct socket *intr_sock)
{
struct bt_sock *ctrl, *intr;
struct hidp_session *session;
if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))
return -EINVAL;
ctrl = bt_sk(ctrl_sock->sk);
intr = bt_sk(intr_sock->sk);
if (bacmp(&ctrl->src, &intr->src) || bacmp(&ctrl->dst, &intr->dst))
return -ENOTUNIQ;
if (ctrl->sk.sk_state != BT_CONNECTED ||
intr->sk.sk_state != BT_CONNECTED)
return -EBADFD;
/* early session check, we check again during session registration */
session = hidp_session_find(&ctrl->dst);
if (session) {
hidp_session_put(session);
return -EEXIST;
}
if (session->hid) {
hid_destroy_device(session->hid);
session->hid = NULL;
return 0;
}
int hidp_connection_add(struct hidp_connadd_req *req,
struct socket *ctrl_sock,
struct socket *intr_sock)
{
struct hidp_session *session;
struct l2cap_conn *conn;
struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
int ret;
ret = hidp_verify_sockets(ctrl_sock, intr_sock);
if (ret)
return ret;
conn = NULL;
l2cap_chan_lock(chan);
if (chan->conn) {
l2cap_conn_get(chan->conn);
conn = chan->conn;
}
l2cap_chan_unlock(chan);
kfree(session->rd_data);
session->rd_data = NULL;
if (!conn)
return -EBADFD;
purge:
__hidp_unlink_session(session);
ret = hidp_session_new(&session, &bt_sk(ctrl_sock->sk)->dst, ctrl_sock,
intr_sock, req, conn);
if (ret)
goto out_conn;
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
ret = l2cap_register_user(conn, &session->user);
if (ret)
goto out_session;
failed:
up_write(&hidp_session_sem);
ret = 0;
kfree(session);
return err;
out_session:
hidp_session_put(session);
out_conn:
l2cap_conn_put(conn);
return ret;
}
int hidp_del_connection(struct hidp_conndel_req *req)
int hidp_connection_del(struct hidp_conndel_req *req)
{
struct hidp_session *session;
int err = 0;
BT_DBG("");
session = hidp_session_find(&req->bdaddr);
if (!session)
return -ENOENT;
down_read(&hidp_session_sem);
if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG))
hidp_send_ctrl_message(session,
HIDP_TRANS_HID_CONTROL |
HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
NULL, 0);
else
l2cap_unregister_user(session->conn, &session->user);
session = __hidp_get_session(&req->bdaddr);
if (session) {
if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) {
hidp_send_ctrl_message(session,
HIDP_TRANS_HID_CONTROL | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, NULL, 0);
} else {
/* Flush the transmit queues */
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
atomic_inc(&session->terminate);
wake_up_process(session->task);
}
} else
err = -ENOENT;
hidp_session_put(session);
up_read(&hidp_session_sem);
return err;
return 0;
}
int hidp_get_connlist(struct hidp_connlist_req *req)
......@@ -1157,7 +1334,7 @@ int hidp_get_connlist(struct hidp_connlist_req *req)
list_for_each_entry(session, &hidp_session_list, list) {
struct hidp_conninfo ci;
__hidp_copy_session(session, &ci);
hidp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
err = -EFAULT;
......@@ -1178,18 +1355,14 @@ int hidp_get_connlist(struct hidp_connlist_req *req)
int hidp_get_conninfo(struct hidp_conninfo *ci)
{
struct hidp_session *session;
int err = 0;
down_read(&hidp_session_sem);
session = __hidp_get_session(&ci->bdaddr);
if (session)
__hidp_copy_session(session, ci);
else
err = -ENOENT;
session = hidp_session_find(&ci->bdaddr);
if (session) {
hidp_copy_session(session, ci);
hidp_session_put(session);
}
up_read(&hidp_session_sem);
return err;
return session ? 0 : -ENOENT;
}
static int __init hidp_init(void)
......@@ -1208,6 +1381,7 @@ module_init(hidp_init);
module_exit(hidp_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
......
......@@ -24,7 +24,9 @@
#define __HIDP_H
#include <linux/types.h>
#include <linux/kref.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/l2cap.h>
/* HIDP header masks */
#define HIDP_HEADER_TRANS_MASK 0xf0
......@@ -119,43 +121,52 @@ struct hidp_connlist_req {
struct hidp_conninfo __user *ci;
};
int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
int hidp_del_connection(struct hidp_conndel_req *req);
int hidp_connection_add(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
int hidp_connection_del(struct hidp_conndel_req *req);
int hidp_get_connlist(struct hidp_connlist_req *req);
int hidp_get_conninfo(struct hidp_conninfo *ci);
enum hidp_session_state {
HIDP_SESSION_IDLING,
HIDP_SESSION_RUNNING,
};
/* HIDP session defines */
struct hidp_session {
struct list_head list;
struct kref ref;
struct hci_conn *conn;
/* runtime management */
atomic_t state;
wait_queue_head_t state_queue;
atomic_t terminate;
struct task_struct *task;
unsigned long flags;
/* connection management */
bdaddr_t bdaddr;
struct l2cap_conn *conn;
struct l2cap_user user;
struct socket *ctrl_sock;
struct socket *intr_sock;
bdaddr_t bdaddr;
unsigned long state;
unsigned long flags;
unsigned long idle_to;
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
uint ctrl_mtu;
uint intr_mtu;
unsigned long idle_to;
atomic_t terminate;
struct task_struct *task;
unsigned char keys[8];
unsigned char leds;
/* device management */
struct input_dev *input;
struct hid_device *hid;
struct timer_list timer;
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
/* session data */
unsigned char keys[8];
unsigned char leds;
/* Used in hidp_get_raw_report() */
int waiting_report_type; /* HIDP_DATA_RTYPE_* */
......@@ -166,24 +177,8 @@ struct hidp_session {
/* Used in hidp_output_raw_report() */
int output_report_success; /* boolean */
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
wait_queue_head_t startup_queue;
int waiting_for_startup;
};
static inline void hidp_schedule(struct hidp_session *session)
{
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
wake_up_interruptible(sk_sleep(ctrl_sk));
wake_up_interruptible(sk_sleep(intr_sk));
}
/* HIDP init defines */
extern int __init hidp_init_sockets(void);
extern void __exit hidp_cleanup_sockets(void);
......
......@@ -77,21 +77,12 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return err;
}
if (csock->sk->sk_state != BT_CONNECTED ||
isock->sk->sk_state != BT_CONNECTED) {
sockfd_put(csock);
sockfd_put(isock);
return -EBADFD;
}
err = hidp_connection_add(&ca, csock, isock);
if (!err && copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
err = hidp_add_connection(&ca, csock, isock);
if (!err) {
if (copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
} else {
sockfd_put(csock);
sockfd_put(isock);
}
sockfd_put(csock);
sockfd_put(isock);
return err;
......@@ -102,7 +93,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
return hidp_del_connection(&cd);
return hidp_connection_del(&cd);
case HIDPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
......@@ -296,7 +287,6 @@ int __init hidp_init_sockets(void)
return 0;
error:
BT_ERR("Can't register HIDP socket");
proto_unregister(&hidp_proto);
return err;
}
......
......@@ -571,7 +571,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
chan->conn = NULL;
if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
hci_conn_put(conn->hcon);
hci_conn_drop(conn->hcon);
if (mgr && mgr->bredr_chan == chan)
mgr->bredr_chan = NULL;
......@@ -1446,6 +1446,89 @@ static void l2cap_info_timeout(struct work_struct *work)
l2cap_conn_start(conn);
}
/*
* l2cap_user
* External modules can register l2cap_user objects on l2cap_conn. The ->probe
* callback is called during registration. The ->remove callback is called
* during unregistration.
* An l2cap_user object can either be explicitly unregistered or when the
* underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
* l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
* External modules must own a reference to the l2cap_conn object if they intend
* to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
* any time if they don't.
*/
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
struct hci_dev *hdev = conn->hcon->hdev;
int ret;
/* We need to check whether l2cap_conn is registered. If it is not, we
* must not register the l2cap_user. l2cap_conn_del() is unregisters
* l2cap_conn objects, but doesn't provide its own locking. Instead, it
* relies on the parent hci_conn object to be locked. This itself relies
* on the hci_dev object to be locked. So we must lock the hci device
* here, too. */
hci_dev_lock(hdev);
if (user->list.next || user->list.prev) {
ret = -EINVAL;
goto out_unlock;
}
/* conn->hchan is NULL after l2cap_conn_del() was called */
if (!conn->hchan) {
ret = -ENODEV;
goto out_unlock;
}
ret = user->probe(conn, user);
if (ret)
goto out_unlock;
list_add(&user->list, &conn->users);
ret = 0;
out_unlock:
hci_dev_unlock(hdev);
return ret;
}
EXPORT_SYMBOL(l2cap_register_user);
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
struct hci_dev *hdev = conn->hcon->hdev;
hci_dev_lock(hdev);
if (!user->list.next || !user->list.prev)
goto out_unlock;
list_del(&user->list);
user->list.next = NULL;
user->list.prev = NULL;
user->remove(conn, user);
out_unlock:
hci_dev_unlock(hdev);
}
EXPORT_SYMBOL(l2cap_unregister_user);
static void l2cap_unregister_all_users(struct l2cap_conn *conn)
{
struct l2cap_user *user;
while (!list_empty(&conn->users)) {
user = list_first_entry(&conn->users, struct l2cap_user, list);
list_del(&user->list);
user->list.next = NULL;
user->list.prev = NULL;
user->remove(conn, user);
}
}
static void l2cap_conn_del(struct hci_conn *hcon, int err)
{
struct l2cap_conn *conn = hcon->l2cap_data;
......@@ -1458,6 +1541,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
kfree_skb(conn->rx_skb);
l2cap_unregister_all_users(conn);
mutex_lock(&conn->chan_lock);
/* Kill channels */
......@@ -1486,7 +1571,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
}
hcon->l2cap_data = NULL;
kfree(conn);
conn->hchan = NULL;
l2cap_conn_put(conn);
}
static void security_timeout(struct work_struct *work)
......@@ -1502,12 +1588,12 @@ static void security_timeout(struct work_struct *work)
}
}
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
{
struct l2cap_conn *conn = hcon->l2cap_data;
struct hci_chan *hchan;
if (conn || status)
if (conn)
return conn;
hchan = hci_chan_create(hcon);
......@@ -1520,8 +1606,10 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
return NULL;
}
kref_init(&conn->ref);
hcon->l2cap_data = conn;
conn->hcon = hcon;
hci_conn_get(conn->hcon);
conn->hchan = hchan;
BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
......@@ -1547,6 +1635,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
mutex_init(&conn->chan_lock);
INIT_LIST_HEAD(&conn->chan_l);
INIT_LIST_HEAD(&conn->users);
if (hcon->type == LE_LINK)
INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
......@@ -1558,6 +1647,26 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
return conn;
}
static void l2cap_conn_free(struct kref *ref)
{
struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
hci_conn_put(conn->hcon);
kfree(conn);
}
void l2cap_conn_get(struct l2cap_conn *conn)
{
kref_get(&conn->ref);
}
EXPORT_SYMBOL(l2cap_conn_get);
void l2cap_conn_put(struct l2cap_conn *conn)
{
kref_put(&conn->ref, l2cap_conn_free);
}
EXPORT_SYMBOL(l2cap_conn_put);
/* ---- Socket interface ---- */
/* Find socket with psm and source / destination bdaddr.
......@@ -1695,9 +1804,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
goto done;
}
conn = l2cap_conn_add(hcon, 0);
conn = l2cap_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
hci_conn_drop(hcon);
err = -ENOMEM;
goto done;
}
......@@ -1707,7 +1816,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
if (!list_empty(&conn->chan_l)) {
err = -EBUSY;
hci_conn_put(hcon);
hci_conn_drop(hcon);
}
if (err)
......@@ -6313,7 +6422,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
conn = l2cap_conn_add(hcon, status);
conn = l2cap_conn_add(hcon);
if (conn)
l2cap_conn_ready(conn);
} else {
......@@ -6482,7 +6591,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
if (!conn)
conn = l2cap_conn_add(hcon, 0);
conn = l2cap_conn_add(hcon);
if (!conn)
goto drop;
......
......@@ -43,6 +43,12 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent);
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
int proto, gfp_t prio);
bool l2cap_is_socket(struct socket *sock)
{
return sock && sock->ops == &l2cap_sock_ops;
}
EXPORT_SYMBOL(l2cap_is_socket);
static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sock *sk = sock->sk;
......
......@@ -106,11 +106,10 @@ static const u16 mgmt_events[] = {
* These LE scan and inquiry parameters were chosen according to LE General
* Discovery Procedure specification.
*/
#define LE_SCAN_TYPE 0x01
#define LE_SCAN_WIN 0x12
#define LE_SCAN_INT 0x12
#define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
#define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
#define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240)
#define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120)
#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
......@@ -2131,7 +2130,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
conn->security_cfm_cb = NULL;
conn->disconn_cfm_cb = NULL;
hci_conn_put(conn);
hci_conn_drop(conn);
mgmt_pending_remove(cmd);
}
......@@ -2222,7 +2221,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (conn->connect_cfm_cb) {
hci_conn_put(conn);
hci_conn_drop(conn);
err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
MGMT_STATUS_BUSY, &rp, sizeof(rp));
goto unlock;
......@@ -2231,7 +2230,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
hci_conn_put(conn);
hci_conn_drop(conn);
goto unlock;
}
......@@ -2703,7 +2702,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
break;
......@@ -2715,8 +2714,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
LE_SCAN_TIMEOUT_BREDR_LE);
err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
break;
default:
......
......@@ -83,7 +83,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
if (!conn)
return NULL;
......@@ -185,7 +185,7 @@ static int sco_connect(struct sock *sk)
conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
hci_conn_drop(hcon);
err = -ENOMEM;
goto done;
}
......@@ -353,7 +353,7 @@ static void __sco_sock_close(struct sock *sk)
if (sco_pi(sk)->conn->hcon) {
sk->sk_state = BT_DISCONN;
sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
hci_conn_put(sco_pi(sk)->conn->hcon);
hci_conn_drop(sco_pi(sk)->conn->hcon);
sco_pi(sk)->conn->hcon = NULL;
} else
sco_chan_del(sk, ECONNRESET);
......@@ -481,8 +481,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
int err;
BT_DBG("sk %p", sk);
......@@ -653,6 +652,42 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("conn %p", conn);
conn->state = BT_CONFIG;
if (!lmp_esco_capable(hdev)) {
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
cp.role = 0x00; /* Become master */
else
cp.role = 0x01; /* Remain slave */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else {
struct hci_cp_accept_sync_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.content_format = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
}
}
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
......@@ -663,7 +698,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
hci_conn_accept(pi->conn->hcon, 0);
sco_conn_defer_accept(pi->conn->hcon, 0);
sk->sk_state = BT_CONFIG;
release_sock(sk);
......@@ -882,7 +917,7 @@ static void sco_chan_del(struct sock *sk, int err)
sco_conn_unlock(conn);
if (conn->hcon)
hci_conn_put(conn->hcon);
hci_conn_drop(conn->hcon);
}
sk->sk_state = BT_CLOSED;
......
......@@ -522,7 +522,7 @@ void smp_chan_destroy(struct l2cap_conn *conn)
kfree(smp);
conn->smp_chan = NULL;
conn->hcon->smp_conn = NULL;
hci_conn_put(conn->hcon);
hci_conn_drop(conn->hcon);
}
int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment