2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 * $Id: hci_core.c,v 1.14 2002/08/26 16:57:57 maxk Exp $
31 #include <linux/config.h>
32 #include <linux/module.h>
33 #include <linux/kmod.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/major.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/interrupt.h>
46 #include <linux/notifier.h>
49 #include <asm/system.h>
50 #include <asm/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #ifndef HCI_CORE_DEBUG
58 #define BT_DBG( A... )
61 static void hci_cmd_task(unsigned long arg);
62 static void hci_rx_task(unsigned long arg);
63 static void hci_tx_task(unsigned long arg);
64 static void hci_notify(struct hci_dev *hdev, int event);
66 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
70 rwlock_t hdev_list_lock = RW_LOCK_UNLOCKED;
73 #define HCI_MAX_PROTO 2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
80 /* ---- HCI notifications ---- */
82 int hci_register_notifier(struct notifier_block *nb)
84 return notifier_chain_register(&hci_notifier, nb);
87 int hci_unregister_notifier(struct notifier_block *nb)
89 return notifier_chain_unregister(&hci_notifier, nb);
92 void hci_notify(struct hci_dev *hdev, int event)
94 notifier_call_chain(&hci_notifier, event, hdev);
97 /* ---- HCI hotplug support ---- */
101 static int hci_run_hotplug(char *dev, char *action)
103 char *argv[3], *envp[5], dstr[20], astr[32];
105 sprintf(dstr, "DEVICE=%s", dev);
106 sprintf(astr, "ACTION=%s", action);
108 argv[0] = hotplug_path;
109 argv[1] = "bluetooth";
113 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
118 return call_usermodehelper(argv[0], argv, envp);
121 #define hci_run_hotplug(A...)
124 /* ---- HCI requests ---- */
126 void hci_req_complete(struct hci_dev *hdev, int result)
128 BT_DBG("%s result 0x%2.2x", hdev->name, result);
130 if (hdev->req_status == HCI_REQ_PEND) {
131 hdev->req_result = result;
132 hdev->req_status = HCI_REQ_DONE;
133 wake_up_interruptible(&hdev->req_wait_q);
137 void hci_req_cancel(struct hci_dev *hdev, int err)
139 BT_DBG("%s err 0x%2.2x", hdev->name, err);
141 if (hdev->req_status == HCI_REQ_PEND) {
142 hdev->req_result = err;
143 hdev->req_status = HCI_REQ_CANCELED;
144 wake_up_interruptible(&hdev->req_wait_q);
148 /* Execute request and wait for completion. */
149 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), unsigned long opt, __u32 timeout)
151 DECLARE_WAITQUEUE(wait, current);
154 BT_DBG("%s start", hdev->name);
156 hdev->req_status = HCI_REQ_PEND;
158 add_wait_queue(&hdev->req_wait_q, &wait);
159 set_current_state(TASK_INTERRUPTIBLE);
162 schedule_timeout(timeout);
164 set_current_state(TASK_RUNNING);
165 remove_wait_queue(&hdev->req_wait_q, &wait);
167 if (signal_pending(current))
170 switch (hdev->req_status) {
172 err = -bterr(hdev->req_result);
175 case HCI_REQ_CANCELED:
176 err = -hdev->req_result;
184 hdev->req_status = hdev->req_result = 0;
186 BT_DBG("%s end: err %d", hdev->name, err);
191 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
192 unsigned long opt, __u32 timeout)
196 /* Serialize all requests */
198 ret = __hci_request(hdev, req, opt, timeout);
199 hci_req_unlock(hdev);
204 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
206 BT_DBG("%s %ld", hdev->name, opt);
209 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
212 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
217 BT_DBG("%s %ld", hdev->name, opt);
219 /* Mandatory initialization */
222 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
223 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
225 /* Read Local Supported Features */
226 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
232 /* Host buffer size */
234 host_buffer_size_cp bs;
235 bs.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
236 bs.sco_mtu = HCI_MAX_SCO_SIZE;
237 bs.acl_max_pkt = __cpu_to_le16(0xffff);
238 bs.sco_max_pkt = __cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE,
240 HOST_BUFFER_SIZE_CP_SIZE, &bs);
244 /* Read BD Address */
245 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
247 /* Optional initialization */
249 /* Clear Event Filters */
250 ef.flt_type = FLT_CLEAR_ALL;
251 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &ef);
253 /* Page timeout ~20 secs */
254 param = __cpu_to_le16(0x8000);
255 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m);
257 /* Connection accept timeout ~20 secs */
258 param = __cpu_to_le16(0x7d00);
259 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m);
262 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
266 BT_DBG("%s %x", hdev->name, scan);
268 /* Inquiry and Page scans */
269 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
272 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
276 BT_DBG("%s %x", hdev->name, auth);
279 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
282 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
286 BT_DBG("%s %x", hdev->name, encrypt);
289 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
292 /* Get HCI device by index.
293 * Device is locked on return. */
294 struct hci_dev *hci_dev_get(int index)
296 struct hci_dev *hdev;
304 read_lock(&hdev_list_lock);
305 list_for_each(p, &hdev_list) {
306 hdev = list_entry(p, struct hci_dev, list);
307 if (hdev->id == index) {
314 read_unlock(&hdev_list_lock);
318 /* ---- Inquiry support ---- */
319 void inquiry_cache_flush(struct hci_dev *hdev)
321 struct inquiry_cache *cache = &hdev->inq_cache;
322 struct inquiry_entry *next = cache->list, *e;
324 BT_DBG("cache %p", cache);
333 struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *e;
338 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
340 for (e = cache->list; e; e = e->next)
341 if (!bacmp(&e->info.bdaddr, bdaddr))
346 void inquiry_cache_update(struct hci_dev *hdev, inquiry_info *info)
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
351 BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
353 if (!(e = inquiry_cache_lookup(hdev, &info->bdaddr))) {
354 /* Entry not in the cache. Add new one. */
355 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
357 memset(e, 0, sizeof(struct inquiry_entry));
358 e->next = cache->list;
362 memcpy(&e->info, info, sizeof(inquiry_info));
363 e->timestamp = jiffies;
364 cache->timestamp = jiffies;
367 int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
369 struct inquiry_cache *cache = &hdev->inq_cache;
370 inquiry_info *info = (inquiry_info *) buf;
371 struct inquiry_entry *e;
374 for (e = cache->list; e && copied < num; e = e->next, copied++)
375 memcpy(info++, &e->info, sizeof(inquiry_info));
377 BT_DBG("cache %p, copied %d", cache, copied);
381 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
383 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
386 BT_DBG("%s", hdev->name);
388 if (test_bit(HCI_INQUIRY, &hdev->flags))
392 memcpy(&ic.lap, &ir->lap, 3);
393 ic.length = ir->length;
394 ic.num_rsp = ir->num_rsp;
395 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, INQUIRY_CP_SIZE, &ic);
398 int hci_inquiry(unsigned long arg)
400 struct hci_inquiry_req ir;
401 struct hci_dev *hdev;
402 int err = 0, do_inquiry = 0, max_rsp;
407 if (copy_from_user(&ir, ptr, sizeof(ir)))
410 if (!(hdev = hci_dev_get(ir.dev_id)))
413 hci_dev_lock_bh(hdev);
414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415 inquiry_cache_empty(hdev) ||
416 ir.flags & IREQ_CACHE_FLUSH) {
417 inquiry_cache_flush(hdev);
420 hci_dev_unlock_bh(hdev);
422 timeo = ir.length * 2 * HZ;
423 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
426 /* for unlimited number of responses we will use buffer with 255 entries */
427 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
429 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
430 * copy it to the user space.
432 if (!(buf = kmalloc(sizeof(inquiry_info) * max_rsp, GFP_KERNEL))) {
437 hci_dev_lock_bh(hdev);
438 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
439 hci_dev_unlock_bh(hdev);
441 BT_DBG("num_rsp %d", ir.num_rsp);
443 if (!verify_area(VERIFY_WRITE, ptr, sizeof(ir) +
444 (sizeof(inquiry_info) * ir.num_rsp))) {
445 copy_to_user(ptr, &ir, sizeof(ir));
447 copy_to_user(ptr, buf, sizeof(inquiry_info) * ir.num_rsp);
458 /* ---- HCI ioctl helpers ---- */
460 int hci_dev_open(__u16 dev)
462 struct hci_dev *hdev;
465 if (!(hdev = hci_dev_get(dev)))
468 BT_DBG("%s %p", hdev->name, hdev);
472 if (test_bit(HCI_UP, &hdev->flags)) {
477 if (hdev->open(hdev)) {
482 if (!test_bit(HCI_RAW, &hdev->flags)) {
483 atomic_set(&hdev->cmd_cnt, 1);
484 set_bit(HCI_INIT, &hdev->flags);
486 //__hci_request(hdev, hci_reset_req, 0, HZ);
487 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
489 clear_bit(HCI_INIT, &hdev->flags);
493 set_bit(HCI_UP, &hdev->flags);
494 hci_notify(hdev, HCI_DEV_UP);
496 /* Init failed, cleanup */
497 tasklet_kill(&hdev->rx_task);
498 tasklet_kill(&hdev->tx_task);
499 tasklet_kill(&hdev->cmd_task);
501 skb_queue_purge(&hdev->cmd_q);
502 skb_queue_purge(&hdev->rx_q);
507 if (hdev->sent_cmd) {
508 kfree_skb(hdev->sent_cmd);
509 hdev->sent_cmd = NULL;
517 hci_req_unlock(hdev);
522 static int hci_dev_do_close(struct hci_dev *hdev)
524 BT_DBG("%s %p", hdev->name, hdev);
526 hci_req_cancel(hdev, ENODEV);
529 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
530 hci_req_unlock(hdev);
534 /* Kill RX and TX tasks */
535 tasklet_kill(&hdev->rx_task);
536 tasklet_kill(&hdev->tx_task);
538 hci_dev_lock_bh(hdev);
539 inquiry_cache_flush(hdev);
540 hci_conn_hash_flush(hdev);
541 hci_dev_unlock_bh(hdev);
543 hci_notify(hdev, HCI_DEV_DOWN);
549 skb_queue_purge(&hdev->cmd_q);
550 atomic_set(&hdev->cmd_cnt, 1);
551 set_bit(HCI_INIT, &hdev->flags);
552 __hci_request(hdev, hci_reset_req, 0, HZ/4);
553 clear_bit(HCI_INIT, &hdev->flags);
556 tasklet_kill(&hdev->cmd_task);
559 skb_queue_purge(&hdev->rx_q);
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->raw_q);
563 /* Drop last sent command */
564 if (hdev->sent_cmd) {
565 kfree_skb(hdev->sent_cmd);
566 hdev->sent_cmd = NULL;
569 /* After this point our queues are empty
570 * and no tasks are scheduled. */
576 hci_req_unlock(hdev);
580 int hci_dev_close(__u16 dev)
582 struct hci_dev *hdev;
585 if (!(hdev = hci_dev_get(dev)))
587 err = hci_dev_do_close(hdev);
592 int hci_dev_reset(__u16 dev)
594 struct hci_dev *hdev;
597 if (!(hdev = hci_dev_get(dev)))
601 tasklet_disable(&hdev->tx_task);
603 if (!test_bit(HCI_UP, &hdev->flags))
607 skb_queue_purge(&hdev->rx_q);
608 skb_queue_purge(&hdev->cmd_q);
610 hci_dev_lock_bh(hdev);
611 inquiry_cache_flush(hdev);
612 hci_conn_hash_flush(hdev);
613 hci_dev_unlock_bh(hdev);
618 atomic_set(&hdev->cmd_cnt, 1);
619 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
621 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
624 tasklet_enable(&hdev->tx_task);
625 hci_req_unlock(hdev);
630 int hci_dev_reset_stat(__u16 dev)
632 struct hci_dev *hdev;
635 if (!(hdev = hci_dev_get(dev)))
638 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
645 int hci_dev_cmd(unsigned int cmd, unsigned long arg)
647 struct hci_dev *hdev;
648 struct hci_dev_req dr;
651 if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
654 if (!(hdev = hci_dev_get(dr.dev_id)))
659 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
663 if (!lmp_encrypt_capable(hdev)) {
668 if (!test_bit(HCI_AUTH, &hdev->flags)) {
669 /* Auth must be enabled first */
670 err = hci_request(hdev, hci_auth_req,
671 dr.dev_opt, HCI_INIT_TIMEOUT);
676 err = hci_request(hdev, hci_encrypt_req,
677 dr.dev_opt, HCI_INIT_TIMEOUT);
681 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
685 hdev->pkt_type = (__u16) dr.dev_opt;
689 hdev->link_policy = (__u16) dr.dev_opt;
693 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
697 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
698 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
702 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
703 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
714 int hci_get_dev_list(unsigned long arg)
716 struct hci_dev_list_req *dl;
717 struct hci_dev_req *dr;
719 int n = 0, size, err;
722 if (get_user(dev_num, (__u16 *) arg))
725 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
728 size = sizeof(*dl) + dev_num * sizeof(*dr);
730 if (!(dl = kmalloc(size, GFP_KERNEL)))
735 read_lock_bh(&hdev_list_lock);
736 list_for_each(p, &hdev_list) {
737 struct hci_dev *hdev;
738 hdev = list_entry(p, struct hci_dev, list);
739 (dr + n)->dev_id = hdev->id;
740 (dr + n)->dev_opt = hdev->flags;
744 read_unlock_bh(&hdev_list_lock);
747 size = sizeof(*dl) + n * sizeof(*dr);
749 err = copy_to_user((void *) arg, dl, size);
752 return err ? -EFAULT : 0;
755 int hci_get_dev_info(unsigned long arg)
757 struct hci_dev *hdev;
758 struct hci_dev_info di;
761 if (copy_from_user(&di, (void *) arg, sizeof(di)))
764 if (!(hdev = hci_dev_get(di.dev_id)))
767 strcpy(di.name, hdev->name);
768 di.bdaddr = hdev->bdaddr;
769 di.type = hdev->type;
770 di.flags = hdev->flags;
771 di.pkt_type = hdev->pkt_type;
772 di.acl_mtu = hdev->acl_mtu;
773 di.acl_pkts = hdev->acl_pkts;
774 di.sco_mtu = hdev->sco_mtu;
775 di.sco_pkts = hdev->sco_pkts;
776 di.link_policy = hdev->link_policy;
777 di.link_mode = hdev->link_mode;
779 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
780 memcpy(&di.features, &hdev->features, sizeof(di.features));
782 if (copy_to_user((void *) arg, &di, sizeof(di)))
791 /* ---- Interface to HCI drivers ---- */
793 /* Register HCI device */
794 int hci_register_dev(struct hci_dev *hdev)
796 struct list_head *head = &hdev_list, *p;
799 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
801 if (!hdev->open || !hdev->close || !hdev->destruct)
804 write_lock_bh(&hdev_list_lock);
806 /* Find first available device id */
807 list_for_each(p, &hdev_list) {
808 if (list_entry(p, struct hci_dev, list)->id != id)
813 sprintf(hdev->name, "hci%d", id);
815 list_add(&hdev->list, head);
817 atomic_set(&hdev->refcnt, 1);
818 spin_lock_init(&hdev->lock);
821 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
822 hdev->link_mode = (HCI_LM_ACCEPT);
824 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
825 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
826 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
828 skb_queue_head_init(&hdev->rx_q);
829 skb_queue_head_init(&hdev->cmd_q);
830 skb_queue_head_init(&hdev->raw_q);
832 init_waitqueue_head(&hdev->req_wait_q);
833 init_MUTEX(&hdev->req_lock);
835 inquiry_cache_init(hdev);
837 conn_hash_init(hdev);
839 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
841 atomic_set(&hdev->promisc, 0);
845 write_unlock_bh(&hdev_list_lock);
847 hci_notify(hdev, HCI_DEV_REG);
848 hci_run_hotplug(hdev->name, "register");
853 /* Unregister HCI device */
854 int hci_unregister_dev(struct hci_dev *hdev)
856 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
858 write_lock_bh(&hdev_list_lock);
859 list_del(&hdev->list);
860 write_unlock_bh(&hdev_list_lock);
862 hci_dev_do_close(hdev);
864 hci_notify(hdev, HCI_DEV_UNREG);
865 hci_run_hotplug(hdev->name, "unregister");
873 /* Suspend HCI device */
874 int hci_suspend_dev(struct hci_dev *hdev)
876 hci_notify(hdev, HCI_DEV_SUSPEND);
877 hci_run_hotplug(hdev->name, "suspend");
881 /* Resume HCI device */
882 int hci_resume_dev(struct hci_dev *hdev)
884 hci_notify(hdev, HCI_DEV_RESUME);
885 hci_run_hotplug(hdev->name, "resume");
889 /* Receive frame from HCI drivers */
890 int hci_recv_frame(struct sk_buff *skb)
892 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
894 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) &&
895 !test_bit(HCI_INIT, &hdev->flags)) ) {
900 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
903 bluez_cb(skb)->incomming = 1;
906 do_gettimeofday(&skb->stamp);
908 /* Queue frame for rx task */
909 skb_queue_tail(&hdev->rx_q, skb);
914 /* ---- Interface to upper protocols ---- */
916 /* Register/Unregister protocols.
917 * hci_task_lock is used to ensure that no tasks are running. */
918 int hci_register_proto(struct hci_proto *hp)
922 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
924 if (hp->id >= HCI_MAX_PROTO)
927 write_lock_bh(&hci_task_lock);
929 if (!hci_proto[hp->id])
930 hci_proto[hp->id] = hp;
934 write_unlock_bh(&hci_task_lock);
939 int hci_unregister_proto(struct hci_proto *hp)
943 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
945 if (hp->id >= HCI_MAX_PROTO)
948 write_lock_bh(&hci_task_lock);
950 if (hci_proto[hp->id])
951 hci_proto[hp->id] = NULL;
955 write_unlock_bh(&hci_task_lock);
960 static int hci_send_frame(struct sk_buff *skb)
962 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
969 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
971 if (atomic_read(&hdev->promisc)) {
973 do_gettimeofday(&skb->stamp);
975 hci_send_to_sock(hdev, skb);
978 /* Get rid of skb owner, prior to sending to the driver. */
981 return hdev->send(skb);
984 /* Send HCI command */
985 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
987 int len = HCI_COMMAND_HDR_SIZE + plen;
991 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
993 if (!(skb = bluez_skb_alloc(len, GFP_ATOMIC))) {
994 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
998 hc = (hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
999 hc->opcode = __cpu_to_le16(cmd_opcode_pack(ogf, ocf));
1003 memcpy(skb_put(skb, plen), param, plen);
1005 BT_DBG("skb len %d", skb->len);
1007 skb->pkt_type = HCI_COMMAND_PKT;
1008 skb->dev = (void *) hdev;
1009 skb_queue_tail(&hdev->cmd_q, skb);
1010 hci_sched_cmd(hdev);
1015 /* Get data from the previously sent command */
1016 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1018 hci_command_hdr *hc;
1020 if (!hdev->sent_cmd)
1023 hc = (void *) hdev->sent_cmd->data;
1025 if (hc->opcode != __cpu_to_le16(cmd_opcode_pack(ogf, ocf)))
1028 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1030 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1034 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1039 ah = (hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1040 ah->handle = __cpu_to_le16(acl_handle_pack(handle, flags));
1041 ah->dlen = __cpu_to_le16(len);
1043 skb->h.raw = (void *) ah;
1046 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1048 struct hci_dev *hdev = conn->hdev;
1049 struct sk_buff *list;
1051 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1053 skb->dev = (void *) hdev;
1054 skb->pkt_type = HCI_ACLDATA_PKT;
1055 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1057 if (!(list = skb_shinfo(skb)->frag_list)) {
1058 /* Non fragmented */
1059 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1061 skb_queue_tail(&conn->data_q, skb);
1064 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1066 skb_shinfo(skb)->frag_list = NULL;
1068 /* Queue all fragments atomically */
1069 spin_lock_bh(&conn->data_q.lock);
1071 __skb_queue_tail(&conn->data_q, skb);
1073 skb = list; list = list->next;
1075 skb->dev = (void *) hdev;
1076 skb->pkt_type = HCI_ACLDATA_PKT;
1077 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1079 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1081 __skb_queue_tail(&conn->data_q, skb);
1084 spin_unlock_bh(&conn->data_q.lock);
1092 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1094 struct hci_dev *hdev = conn->hdev;
1097 BT_DBG("%s len %d", hdev->name, skb->len);
1099 if (skb->len > hdev->sco_mtu) {
1104 hs.handle = __cpu_to_le16(conn->handle);
1107 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1108 memcpy(skb->h.raw, &hs, HCI_SCO_HDR_SIZE);
1110 skb->dev = (void *) hdev;
1111 skb->pkt_type = HCI_SCODATA_PKT;
1112 skb_queue_tail(&conn->data_q, skb);
1117 /* ---- HCI TX task (outgoing data) ---- */
1119 /* HCI Connection scheduler */
1120 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1122 struct conn_hash *h = &hdev->conn_hash;
1123 struct hci_conn *conn = NULL;
1124 int num = 0, min = ~0;
1125 struct list_head *p;
1127 /* We don't have to lock device here. Connections are always
1128 * added and removed with TX task disabled. */
1129 list_for_each(p, &h->list) {
1131 c = list_entry(p, struct hci_conn, list);
1133 if (c->type != type || c->state != BT_CONNECTED
1134 || skb_queue_empty(&c->data_q))
1138 if (c->sent < min) {
1145 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1151 BT_DBG("conn %p quote %d", conn, *quote);
1155 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1157 struct conn_hash *h = &hdev->conn_hash;
1158 struct list_head *p;
1161 BT_ERR("%s ACL tx timeout", hdev->name);
1163 /* Kill stalled connections */
1164 list_for_each(p, &h->list) {
1165 c = list_entry(p, struct hci_conn, list);
1166 if (c->type == ACL_LINK && c->sent) {
1167 BT_ERR("%s killing stalled ACL connection %s",
1168 hdev->name, batostr(&c->dst));
1169 hci_acl_disconn(c, 0x13);
1174 static inline void hci_sched_acl(struct hci_dev *hdev)
1176 struct hci_conn *conn;
1177 struct sk_buff *skb;
1180 BT_DBG("%s", hdev->name);
1182 /* ACL tx timeout must be longer than maximum
1183 * link supervision timeout (40.9 seconds) */
1184 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1185 hci_acl_tx_to(hdev);
1187 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1189 BT_DBG("skb %p len %d", skb, skb->len);
1190 hci_send_frame(skb);
1191 hdev->acl_last_tx = jiffies;
1200 static inline void hci_sched_sco(struct hci_dev *hdev)
1202 struct hci_conn *conn;
1203 struct sk_buff *skb;
1206 BT_DBG("%s", hdev->name);
1208 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1209 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1210 BT_DBG("skb %p len %d", skb, skb->len);
1211 hci_send_frame(skb);
1214 if (conn->sent == ~0)
1220 static void hci_tx_task(unsigned long arg)
1222 struct hci_dev *hdev = (struct hci_dev *) arg;
1223 struct sk_buff *skb;
1225 read_lock(&hci_task_lock);
1227 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1229 /* Schedule queues and send stuff to HCI driver */
1231 hci_sched_acl(hdev);
1233 hci_sched_sco(hdev);
1235 /* Send next queued raw (unknown type) packet */
1236 while ((skb = skb_dequeue(&hdev->raw_q)))
1237 hci_send_frame(skb);
1239 read_unlock(&hci_task_lock);
1243 /* ----- HCI RX task (incomming data proccessing) ----- */
1245 /* ACL data packet */
1246 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1248 hci_acl_hdr *ah = (void *) skb->data;
1249 struct hci_conn *conn;
1250 __u16 handle, flags;
1252 skb_pull(skb, HCI_ACL_HDR_SIZE);
1254 handle = __le16_to_cpu(ah->handle);
1255 flags = acl_flags(handle);
1256 handle = acl_handle(handle);
1258 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1260 hdev->stat.acl_rx++;
1263 conn = conn_hash_lookup_handle(hdev, handle);
1264 hci_dev_unlock(hdev);
1267 register struct hci_proto *hp;
1269 /* Send to upper protocol */
1270 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1271 hp->recv_acldata(conn, skb, flags);
1275 BT_ERR("%s ACL packet for unknown connection handle %d",
1276 hdev->name, handle);
1282 /* SCO data packet */
1283 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1285 hci_sco_hdr *sh = (void *) skb->data;
1286 struct hci_conn *conn;
1289 skb_pull(skb, HCI_SCO_HDR_SIZE);
1291 handle = __le16_to_cpu(sh->handle);
1293 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1295 hdev->stat.sco_rx++;
1298 conn = conn_hash_lookup_handle(hdev, handle);
1299 hci_dev_unlock(hdev);
1302 register struct hci_proto *hp;
1304 /* Send to upper protocol */
1305 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1306 hp->recv_scodata(conn, skb);
1310 BT_ERR("%s SCO packet for unknown connection handle %d",
1311 hdev->name, handle);
1317 void hci_rx_task(unsigned long arg)
1319 struct hci_dev *hdev = (struct hci_dev *) arg;
1320 struct sk_buff *skb;
1322 BT_DBG("%s", hdev->name);
1324 read_lock(&hci_task_lock);
1326 while ((skb = skb_dequeue(&hdev->rx_q))) {
1327 if (atomic_read(&hdev->promisc)) {
1328 /* Send copy to the sockets */
1329 hci_send_to_sock(hdev, skb);
1332 if (test_bit(HCI_RAW, &hdev->flags)) {
1337 if (test_bit(HCI_INIT, &hdev->flags)) {
1338 /* Don't process data packets in this states. */
1339 switch (skb->pkt_type) {
1340 case HCI_ACLDATA_PKT:
1341 case HCI_SCODATA_PKT:
1348 switch (skb->pkt_type) {
1350 hci_event_packet(hdev, skb);
1353 case HCI_ACLDATA_PKT:
1354 BT_DBG("%s ACL data packet", hdev->name);
1355 hci_acldata_packet(hdev, skb);
1358 case HCI_SCODATA_PKT:
1359 BT_DBG("%s SCO data packet", hdev->name);
1360 hci_scodata_packet(hdev, skb);
1369 read_unlock(&hci_task_lock);
1372 static void hci_cmd_task(unsigned long arg)
1374 struct hci_dev *hdev = (struct hci_dev *) arg;
1375 struct sk_buff *skb;
1377 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1379 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1380 BT_ERR("%s command tx timeout", hdev->name);
1381 atomic_set(&hdev->cmd_cnt, 1);
1384 /* Send queued commands */
1385 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1387 kfree_skb(hdev->sent_cmd);
1389 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1390 atomic_dec(&hdev->cmd_cnt);
1391 hci_send_frame(skb);
1392 hdev->cmd_last_tx = jiffies;
1394 skb_queue_head(&hdev->cmd_q, skb);
1395 hci_sched_cmd(hdev);
1400 /* ---- Initialization ---- */
1402 int hci_core_init(void)
1407 int hci_core_cleanup(void)