2007-02-09 22:24:33 +08:00
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
BlueZ - Bluetooth protocol stack for Linux
|
|
|
|
Copyright (C) 2000-2001 Qualcomm Incorporated
|
2011-12-18 23:39:33 +08:00
|
|
|
Copyright (C) 2011 ProFUSION Embedded Systems
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License version 2 as
|
|
|
|
published by the Free Software Foundation;
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
|
|
|
|
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
|
2007-02-09 22:24:33 +08:00
|
|
|
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
|
|
|
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
2005-04-17 06:20:36 +08:00
|
|
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
|
2007-02-09 22:24:33 +08:00
|
|
|
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
|
|
|
|
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
|
2005-04-17 06:20:36 +08:00
|
|
|
SOFTWARE IS DISCLAIMED.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Bluetooth HCI core. */
|
|
|
|
|
2008-02-18 15:25:57 +08:00
|
|
|
#include <linux/jiffies.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kmod.h>
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/skbuff.h>
|
2010-03-20 22:20:04 +08:00
|
|
|
#include <linux/workqueue.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/notifier.h>
|
2009-06-08 20:41:38 +08:00
|
|
|
#include <linux/rfkill.h>
|
2011-02-16 22:32:41 +08:00
|
|
|
#include <linux/timer.h>
|
2011-06-10 05:50:43 +08:00
|
|
|
#include <linux/crypto.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/sock.h>
|
|
|
|
|
|
|
|
#include <asm/system.h>
|
2010-12-01 22:58:25 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/unaligned.h>
|
|
|
|
|
|
|
|
#include <net/bluetooth/bluetooth.h>
|
|
|
|
#include <net/bluetooth/hci_core.h>
|
|
|
|
|
2010-12-15 19:53:18 +08:00
|
|
|
#define AUTO_OFF_TIMEOUT 2000
|
|
|
|
|
2012-01-11 04:33:50 +08:00
|
|
|
bool enable_hs;
|
2011-11-18 19:35:42 +08:00
|
|
|
|
2010-08-09 11:06:53 +08:00
|
|
|
static void hci_rx_work(struct work_struct *work);
|
2011-12-15 09:53:47 +08:00
|
|
|
static void hci_cmd_work(struct work_struct *work);
|
2011-12-15 10:50:02 +08:00
|
|
|
static void hci_tx_work(struct work_struct *work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* HCI device list */
|
|
|
|
LIST_HEAD(hci_dev_list);
|
|
|
|
DEFINE_RWLOCK(hci_dev_list_lock);
|
|
|
|
|
|
|
|
/* HCI callback list */
|
|
|
|
LIST_HEAD(hci_cb_list);
|
|
|
|
DEFINE_RWLOCK(hci_cb_list_lock);
|
|
|
|
|
|
|
|
/* HCI notifiers list */
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 17:16:30 +08:00
|
|
|
static ATOMIC_NOTIFIER_HEAD(hci_notifier);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* ---- HCI notifications ---- */
|
|
|
|
|
|
|
|
int hci_register_notifier(struct notifier_block *nb)
|
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 17:16:30 +08:00
|
|
|
return atomic_notifier_chain_register(&hci_notifier, nb);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int hci_unregister_notifier(struct notifier_block *nb)
|
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 17:16:30 +08:00
|
|
|
return atomic_notifier_chain_unregister(&hci_notifier, nb);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-10-29 01:20:48 +08:00
|
|
|
static void hci_notify(struct hci_dev *hdev, int event)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 17:16:30 +08:00
|
|
|
atomic_notifier_call_chain(&hci_notifier, event, hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- HCI requests ---- */
|
|
|
|
|
2010-12-22 05:01:27 +08:00
|
|
|
void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-12-22 05:01:27 +08:00
|
|
|
BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
|
|
|
|
|
2011-01-10 19:28:59 +08:00
|
|
|
/* If this is the init phase check if the completed command matches
|
|
|
|
* the last init command, and if not just return.
|
|
|
|
*/
|
|
|
|
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
|
2010-12-22 05:01:27 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (hdev->req_status == HCI_REQ_PEND) {
|
|
|
|
hdev->req_result = result;
|
|
|
|
hdev->req_status = HCI_REQ_DONE;
|
|
|
|
wake_up_interruptible(&hdev->req_wait_q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hci_req_cancel(struct hci_dev *hdev, int err)
|
|
|
|
{
|
|
|
|
BT_DBG("%s err 0x%2.2x", hdev->name, err);
|
|
|
|
|
|
|
|
if (hdev->req_status == HCI_REQ_PEND) {
|
|
|
|
hdev->req_result = err;
|
|
|
|
hdev->req_status = HCI_REQ_CANCELED;
|
|
|
|
wake_up_interruptible(&hdev->req_wait_q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Execute request and wait for completion. */
|
2007-02-09 22:24:33 +08:00
|
|
|
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
|
2011-02-17 23:46:47 +08:00
|
|
|
unsigned long opt, __u32 timeout)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
BT_DBG("%s start", hdev->name);
|
|
|
|
|
|
|
|
hdev->req_status = HCI_REQ_PEND;
|
|
|
|
|
|
|
|
add_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
req(hdev, opt);
|
|
|
|
schedule_timeout(timeout);
|
|
|
|
|
|
|
|
remove_wait_queue(&hdev->req_wait_q, &wait);
|
|
|
|
|
|
|
|
if (signal_pending(current))
|
|
|
|
return -EINTR;
|
|
|
|
|
|
|
|
switch (hdev->req_status) {
|
|
|
|
case HCI_REQ_DONE:
|
2011-06-30 09:18:29 +08:00
|
|
|
err = -bt_to_errno(hdev->req_result);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_REQ_CANCELED:
|
|
|
|
err = -hdev->req_result;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
break;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-01-10 19:28:59 +08:00
|
|
|
hdev->req_status = hdev->req_result = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
BT_DBG("%s end: err %d", hdev->name, err);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
|
2011-02-17 23:46:47 +08:00
|
|
|
unsigned long opt, __u32 timeout)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2008-09-12 09:11:54 +08:00
|
|
|
if (!test_bit(HCI_UP, &hdev->flags))
|
|
|
|
return -ENETDOWN;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Serialize all requests */
|
|
|
|
hci_req_lock(hdev);
|
|
|
|
ret = __hci_request(hdev, req, opt, timeout);
|
|
|
|
hci_req_unlock(hdev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
BT_DBG("%s %ld", hdev->name, opt);
|
|
|
|
|
|
|
|
/* Reset device */
|
2011-03-17 02:36:29 +08:00
|
|
|
set_bit(HCI_RESET, &hdev->flags);
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-12-19 22:31:27 +08:00
|
|
|
static void bredr_init(struct hci_dev *hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-01-10 19:44:55 +08:00
|
|
|
struct hci_cp_delete_stored_link_key cp;
|
2005-11-09 01:57:21 +08:00
|
|
|
__le16 param;
|
2007-09-09 14:39:49 +08:00
|
|
|
__u8 flt_type;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-19 22:31:28 +08:00
|
|
|
hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Mandatory initialization */
|
|
|
|
|
|
|
|
/* Reset */
|
2011-03-17 02:36:29 +08:00
|
|
|
if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
|
2011-12-19 22:31:27 +08:00
|
|
|
set_bit(HCI_RESET, &hdev->flags);
|
|
|
|
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
|
2011-03-17 02:36:29 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Read Local Supported Features */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-09-23 15:57:20 +08:00
|
|
|
/* Read Local Version */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
|
2006-09-23 15:57:20 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Read BD Address */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
|
|
|
|
|
|
|
|
/* Read Class of Device */
|
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
|
|
|
|
|
|
|
|
/* Read Local Name */
|
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Read Voice Setting */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Optional initialization */
|
|
|
|
|
|
|
|
/* Clear Event Filters */
|
2007-09-09 14:39:49 +08:00
|
|
|
flt_type = HCI_FLT_CLEAR_ALL;
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Connection accept timeout ~20 secs */
|
2007-03-26 11:12:50 +08:00
|
|
|
param = cpu_to_le16(0x7d00);
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
|
2011-01-10 19:44:55 +08:00
|
|
|
|
|
|
|
bacpy(&cp.bdaddr, BDADDR_ANY);
|
|
|
|
cp.delete_all = 1;
|
|
|
|
hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-12-19 22:31:27 +08:00
|
|
|
static void amp_init(struct hci_dev *hdev)
|
|
|
|
{
|
2011-12-19 22:31:28 +08:00
|
|
|
hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
|
|
|
|
|
2011-12-19 22:31:27 +08:00
|
|
|
/* Reset */
|
|
|
|
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
|
|
|
|
|
|
|
|
/* Read Local Version */
|
|
|
|
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
BT_DBG("%s %ld", hdev->name, opt);
|
|
|
|
|
|
|
|
/* Driver initialization */
|
|
|
|
|
|
|
|
/* Special commands */
|
|
|
|
while ((skb = skb_dequeue(&hdev->driver_init))) {
|
|
|
|
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
|
|
|
|
skb->dev = (void *) hdev;
|
|
|
|
|
|
|
|
skb_queue_tail(&hdev->cmd_q, skb);
|
|
|
|
queue_work(hdev->workqueue, &hdev->cmd_work);
|
|
|
|
}
|
|
|
|
skb_queue_purge(&hdev->driver_init);
|
|
|
|
|
|
|
|
switch (hdev->dev_type) {
|
|
|
|
case HCI_BREDR:
|
|
|
|
bredr_init(hdev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_AMP:
|
|
|
|
amp_init(hdev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
BT_ERR("Unknown device type %d", hdev->dev_type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-02-11 09:38:48 +08:00
|
|
|
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
/* Read LE buffer size */
|
|
|
|
hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
__u8 scan = opt;
|
|
|
|
|
|
|
|
BT_DBG("%s %x", hdev->name, scan);
|
|
|
|
|
|
|
|
/* Inquiry and Page scans */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
__u8 auth = opt;
|
|
|
|
|
|
|
|
BT_DBG("%s %x", hdev->name, auth);
|
|
|
|
|
|
|
|
/* Authentication */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
__u8 encrypt = opt;
|
|
|
|
|
|
|
|
BT_DBG("%s %x", hdev->name, encrypt);
|
|
|
|
|
2008-07-15 02:13:47 +08:00
|
|
|
/* Encryption */
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-07-15 02:13:47 +08:00
|
|
|
static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
__le16 policy = cpu_to_le16(opt);
|
|
|
|
|
2008-11-30 19:17:28 +08:00
|
|
|
BT_DBG("%s %x", hdev->name, policy);
|
2008-07-15 02:13:47 +08:00
|
|
|
|
|
|
|
/* Default link policy */
|
|
|
|
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:24:33 +08:00
|
|
|
/* Get HCI device by index.
|
2005-04-17 06:20:36 +08:00
|
|
|
* Device is held on return. */
|
|
|
|
struct hci_dev *hci_dev_get(int index)
|
|
|
|
{
|
2011-11-01 16:58:56 +08:00
|
|
|
struct hci_dev *hdev = NULL, *d;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
BT_DBG("%d", index);
|
|
|
|
|
|
|
|
if (index < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
read_lock(&hci_dev_list_lock);
|
2011-11-01 16:58:56 +08:00
|
|
|
list_for_each_entry(d, &hci_dev_list, list) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (d->id == index) {
|
|
|
|
hdev = hci_dev_hold(d);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
read_unlock(&hci_dev_list_lock);
|
|
|
|
return hdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- Inquiry support ---- */
|
2012-01-04 20:23:45 +08:00
|
|
|
|
2012-01-04 21:44:20 +08:00
|
|
|
bool hci_discovery_active(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct discovery_state *discov = &hdev->discovery;
|
|
|
|
|
|
|
|
if (discov->state == DISCOVERY_INQUIRY ||
|
2012-02-04 04:47:57 +08:00
|
|
|
discov->state == DISCOVERY_LE_SCAN ||
|
2012-01-04 21:44:20 +08:00
|
|
|
discov->state == DISCOVERY_RESOLVING)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-01-04 20:23:45 +08:00
|
|
|
void hci_discovery_set_state(struct hci_dev *hdev, int state)
|
|
|
|
{
|
|
|
|
BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
|
|
|
|
|
|
|
|
if (hdev->discovery.state == state)
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case DISCOVERY_STOPPED:
|
|
|
|
mgmt_discovering(hdev, 0);
|
|
|
|
break;
|
|
|
|
case DISCOVERY_STARTING:
|
|
|
|
break;
|
2012-01-04 21:44:20 +08:00
|
|
|
case DISCOVERY_INQUIRY:
|
2012-02-04 04:47:57 +08:00
|
|
|
case DISCOVERY_LE_SCAN:
|
2012-01-04 20:23:45 +08:00
|
|
|
mgmt_discovering(hdev, 1);
|
|
|
|
break;
|
2012-01-04 21:44:20 +08:00
|
|
|
case DISCOVERY_RESOLVING:
|
|
|
|
break;
|
2012-01-04 20:23:45 +08:00
|
|
|
case DISCOVERY_STOPPING:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdev->discovery.state = state;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void inquiry_cache_flush(struct hci_dev *hdev)
|
|
|
|
{
|
2012-01-04 20:16:21 +08:00
|
|
|
struct discovery_state *cache = &hdev->discovery;
|
2012-01-03 22:03:00 +08:00
|
|
|
struct inquiry_entry *p, *n;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-01-04 19:31:59 +08:00
|
|
|
list_for_each_entry_safe(p, n, &cache->all, all) {
|
|
|
|
list_del(&p->all);
|
2012-01-03 22:03:00 +08:00
|
|
|
kfree(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-01-04 19:31:59 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&cache->unknown);
|
|
|
|
INIT_LIST_HEAD(&cache->resolve);
|
2012-01-04 20:23:45 +08:00
|
|
|
cache->state = DISCOVERY_STOPPED;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
2012-01-04 20:16:21 +08:00
|
|
|
struct discovery_state *cache = &hdev->discovery;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inquiry_entry *e;
|
|
|
|
|
|
|
|
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
|
|
|
|
|
2012-01-04 19:31:59 +08:00
|
|
|
list_for_each_entry(e, &cache->all, all) {
|
|
|
|
if (!bacmp(&e->data.bdaddr, bdaddr))
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
|
|
|
|
bdaddr_t *bdaddr)
|
|
|
|
{
|
2012-01-04 20:16:21 +08:00
|
|
|
struct discovery_state *cache = &hdev->discovery;
|
2012-01-04 19:31:59 +08:00
|
|
|
struct inquiry_entry *e;
|
|
|
|
|
|
|
|
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
|
|
|
|
|
|
|
|
list_for_each_entry(e, &cache->unknown, list) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!bacmp(&e->data.bdaddr, bdaddr))
|
2012-01-03 22:03:00 +08:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-01-04 21:44:20 +08:00
|
|
|
struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
|
|
|
|
bdaddr_t *bdaddr,
|
|
|
|
int state)
|
|
|
|
{
|
|
|
|
struct discovery_state *cache = &hdev->discovery;
|
|
|
|
struct inquiry_entry *e;
|
|
|
|
|
|
|
|
BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
|
|
|
|
|
|
|
|
list_for_each_entry(e, &cache->resolve, list) {
|
|
|
|
if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
|
|
|
|
return e;
|
|
|
|
if (!bacmp(&e->data.bdaddr, bdaddr))
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-01-09 06:53:02 +08:00
|
|
|
void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
|
|
|
|
struct inquiry_entry *ie)
|
|
|
|
{
|
|
|
|
struct discovery_state *cache = &hdev->discovery;
|
|
|
|
struct list_head *pos = &cache->resolve;
|
|
|
|
struct inquiry_entry *p;
|
|
|
|
|
|
|
|
list_del(&ie->list);
|
|
|
|
|
|
|
|
list_for_each_entry(p, &cache->resolve, list) {
|
|
|
|
if (p->name_state != NAME_PENDING &&
|
|
|
|
abs(p->data.rssi) >= abs(ie->data.rssi))
|
|
|
|
break;
|
|
|
|
pos = &p->list;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&ie->list, pos);
|
|
|
|
}
|
|
|
|
|
2012-01-04 19:39:52 +08:00
|
|
|
bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
|
2012-01-04 19:31:59 +08:00
|
|
|
bool name_known)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-01-04 20:16:21 +08:00
|
|
|
struct discovery_state *cache = &hdev->discovery;
|
2010-12-01 22:58:25 +08:00
|
|
|
struct inquiry_entry *ie;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
|
2012-01-09 06:53:02 +08:00
|
|
|
if (ie) {
|
|
|
|
if (ie->name_state == NAME_NEEDED &&
|
|
|
|
data->rssi != ie->data.rssi) {
|
|
|
|
ie->data.rssi = data->rssi;
|
|
|
|
hci_inquiry_cache_update_resolve(hdev, ie);
|
|
|
|
}
|
|
|
|
|
2012-01-04 19:31:59 +08:00
|
|
|
goto update;
|
2012-01-09 06:53:02 +08:00
|
|
|
}
|
2012-01-04 19:31:59 +08:00
|
|
|
|
|
|
|
/* Entry not in the cache. Add new one. */
|
|
|
|
ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
|
|
|
|
if (!ie)
|
2012-01-04 19:39:52 +08:00
|
|
|
return false;
|
2012-01-04 19:31:59 +08:00
|
|
|
|
|
|
|
list_add(&ie->all, &cache->all);
|
|
|
|
|
|
|
|
if (name_known) {
|
|
|
|
ie->name_state = NAME_KNOWN;
|
|
|
|
} else {
|
|
|
|
ie->name_state = NAME_NOT_KNOWN;
|
|
|
|
list_add(&ie->list, &cache->unknown);
|
|
|
|
}
|
2010-12-01 22:58:25 +08:00
|
|
|
|
2012-01-04 19:31:59 +08:00
|
|
|
update:
|
|
|
|
if (name_known && ie->name_state != NAME_KNOWN &&
|
|
|
|
ie->name_state != NAME_PENDING) {
|
|
|
|
ie->name_state = NAME_KNOWN;
|
|
|
|
list_del(&ie->list);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
memcpy(&ie->data, data, sizeof(*data));
|
|
|
|
ie->timestamp = jiffies;
|
2005-04-17 06:20:36 +08:00
|
|
|
cache->timestamp = jiffies;
|
2012-01-04 19:39:52 +08:00
|
|
|
|
|
|
|
if (ie->name_state == NAME_NOT_KNOWN)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
|
|
|
|
{
|
2012-01-04 20:16:21 +08:00
|
|
|
struct discovery_state *cache = &hdev->discovery;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inquiry_info *info = (struct inquiry_info *) buf;
|
|
|
|
struct inquiry_entry *e;
|
|
|
|
int copied = 0;
|
|
|
|
|
2012-01-04 19:31:59 +08:00
|
|
|
list_for_each_entry(e, &cache->all, all) {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inquiry_data *data = &e->data;
|
2012-01-03 22:03:00 +08:00
|
|
|
|
|
|
|
if (copied >= num)
|
|
|
|
break;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
bacpy(&info->bdaddr, &data->bdaddr);
|
|
|
|
info->pscan_rep_mode = data->pscan_rep_mode;
|
|
|
|
info->pscan_period_mode = data->pscan_period_mode;
|
|
|
|
info->pscan_mode = data->pscan_mode;
|
|
|
|
memcpy(info->dev_class, data->dev_class, 3);
|
|
|
|
info->clock_offset = data->clock_offset;
|
2012-01-03 22:03:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
info++;
|
2012-01-03 22:03:00 +08:00
|
|
|
copied++;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
BT_DBG("cache %p, copied %d", cache, copied);
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
|
|
|
|
{
|
|
|
|
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
|
|
|
|
struct hci_cp_inquiry cp;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
if (test_bit(HCI_INQUIRY, &hdev->flags))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Start Inquiry */
|
|
|
|
memcpy(&cp.lap, &ir->lap, 3);
|
|
|
|
cp.length = ir->length;
|
|
|
|
cp.num_rsp = ir->num_rsp;
|
2007-10-20 19:33:56 +08:00
|
|
|
hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int hci_inquiry(void __user *arg)
|
|
|
|
{
|
|
|
|
__u8 __user *ptr = arg;
|
|
|
|
struct hci_inquiry_req ir;
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
int err = 0, do_inquiry = 0, max_rsp;
|
|
|
|
long timeo;
|
|
|
|
__u8 *buf;
|
|
|
|
|
|
|
|
if (copy_from_user(&ir, ptr, sizeof(ir)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-01-11 23:20:20 +08:00
|
|
|
hdev = hci_dev_get(ir.dev_id);
|
|
|
|
if (!hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2007-02-09 22:24:33 +08:00
|
|
|
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
|
2010-12-01 22:58:25 +08:00
|
|
|
inquiry_cache_empty(hdev) ||
|
|
|
|
ir.flags & IREQ_CACHE_FLUSH) {
|
2005-04-17 06:20:36 +08:00
|
|
|
inquiry_cache_flush(hdev);
|
|
|
|
do_inquiry = 1;
|
|
|
|
}
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-07-03 16:02:33 +08:00
|
|
|
timeo = ir.length * msecs_to_jiffies(2000);
|
2010-12-01 22:58:25 +08:00
|
|
|
|
|
|
|
if (do_inquiry) {
|
|
|
|
err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
|
|
|
|
if (err < 0)
|
|
|
|
goto done;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* for unlimited number of responses we will use buffer with 255 entries */
|
|
|
|
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
|
|
|
|
|
|
|
|
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
|
|
|
|
* copy it to the user space.
|
|
|
|
*/
|
2011-02-17 23:46:47 +08:00
|
|
|
buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
|
2010-12-01 22:58:25 +08:00
|
|
|
if (!buf) {
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
BT_DBG("num_rsp %d", ir.num_rsp);
|
|
|
|
|
|
|
|
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
|
|
|
|
ptr += sizeof(ir);
|
|
|
|
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
|
|
|
|
ir.num_rsp))
|
|
|
|
err = -EFAULT;
|
2007-02-09 22:24:33 +08:00
|
|
|
} else
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -EFAULT;
|
|
|
|
|
|
|
|
kfree(buf);
|
|
|
|
|
|
|
|
done:
|
|
|
|
hci_dev_put(hdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- HCI ioctl helpers ---- */
|
|
|
|
|
|
|
|
int hci_dev_open(__u16 dev)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
int ret = 0;
|
|
|
|
|
2011-01-11 23:20:20 +08:00
|
|
|
hdev = hci_dev_get(dev);
|
|
|
|
if (!hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
BT_DBG("%s %p", hdev->name, hdev);
|
|
|
|
|
|
|
|
hci_req_lock(hdev);
|
|
|
|
|
2009-06-08 20:41:38 +08:00
|
|
|
if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
|
|
|
|
ret = -ERFKILL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (test_bit(HCI_UP, &hdev->flags)) {
|
|
|
|
ret = -EALREADY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
|
|
|
|
set_bit(HCI_RAW, &hdev->flags);
|
|
|
|
|
2011-11-11 23:02:15 +08:00
|
|
|
/* Treat all non BR/EDR controllers as raw devices if
|
|
|
|
enable_hs is not set */
|
|
|
|
if (hdev->dev_type != HCI_BREDR && !enable_hs)
|
2010-02-13 09:28:41 +08:00
|
|
|
set_bit(HCI_RAW, &hdev->flags);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (hdev->open(hdev)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!test_bit(HCI_RAW, &hdev->flags)) {
|
|
|
|
atomic_set(&hdev->cmd_cnt, 1);
|
|
|
|
set_bit(HCI_INIT, &hdev->flags);
|
2011-01-10 19:28:59 +08:00
|
|
|
hdev->init_last_cmd = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-07-03 16:02:33 +08:00
|
|
|
ret = __hci_request(hdev, hci_init_req, 0,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-01 06:20:55 +08:00
|
|
|
if (lmp_host_le_capable(hdev))
|
2011-02-11 09:38:48 +08:00
|
|
|
ret = __hci_request(hdev, hci_le_init_req, 0,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
clear_bit(HCI_INIT, &hdev->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
hci_dev_hold(hdev);
|
|
|
|
set_bit(HCI_UP, &hdev->flags);
|
|
|
|
hci_notify(hdev, HCI_DEV_UP);
|
2012-01-09 05:11:15 +08:00
|
|
|
if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2011-11-09 02:40:14 +08:00
|
|
|
mgmt_powered(hdev, 1);
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2011-11-09 02:40:16 +08:00
|
|
|
}
|
2007-02-09 22:24:33 +08:00
|
|
|
} else {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Init failed, cleanup */
|
2011-12-15 10:50:02 +08:00
|
|
|
flush_work(&hdev->tx_work);
|
2011-12-15 09:53:47 +08:00
|
|
|
flush_work(&hdev->cmd_work);
|
2010-08-09 11:06:53 +08:00
|
|
|
flush_work(&hdev->rx_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
skb_queue_purge(&hdev->cmd_q);
|
|
|
|
skb_queue_purge(&hdev->rx_q);
|
|
|
|
|
|
|
|
if (hdev->flush)
|
|
|
|
hdev->flush(hdev);
|
|
|
|
|
|
|
|
if (hdev->sent_cmd) {
|
|
|
|
kfree_skb(hdev->sent_cmd);
|
|
|
|
hdev->sent_cmd = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdev->close(hdev);
|
|
|
|
hdev->flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
hci_req_unlock(hdev);
|
|
|
|
hci_dev_put(hdev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hci_dev_do_close(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
BT_DBG("%s %p", hdev->name, hdev);
|
|
|
|
|
|
|
|
hci_req_cancel(hdev, ENODEV);
|
|
|
|
hci_req_lock(hdev);
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
|
2011-04-12 05:46:55 +08:00
|
|
|
del_timer_sync(&hdev->cmd_timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
hci_req_unlock(hdev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-15 10:50:02 +08:00
|
|
|
/* Flush RX and TX works */
|
|
|
|
flush_work(&hdev->tx_work);
|
2010-08-09 11:06:53 +08:00
|
|
|
flush_work(&hdev->rx_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-11-08 04:16:02 +08:00
|
|
|
if (hdev->discov_timeout > 0) {
|
2011-11-09 07:44:22 +08:00
|
|
|
cancel_delayed_work(&hdev->discov_off);
|
2011-11-08 04:16:02 +08:00
|
|
|
hdev->discov_timeout = 0;
|
|
|
|
}
|
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
|
2011-11-09 07:44:22 +08:00
|
|
|
cancel_delayed_work(&hdev->power_off);
|
2011-11-08 04:16:04 +08:00
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
|
2011-12-15 06:47:39 +08:00
|
|
|
cancel_delayed_work(&hdev->service_cache);
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
inquiry_cache_flush(hdev);
|
|
|
|
hci_conn_hash_flush(hdev);
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
hci_notify(hdev, HCI_DEV_DOWN);
|
|
|
|
|
|
|
|
if (hdev->flush)
|
|
|
|
hdev->flush(hdev);
|
|
|
|
|
|
|
|
/* Reset device */
|
|
|
|
skb_queue_purge(&hdev->cmd_q);
|
|
|
|
atomic_set(&hdev->cmd_cnt, 1);
|
2012-02-04 03:29:40 +08:00
|
|
|
if (!test_bit(HCI_RAW, &hdev->flags) &&
|
|
|
|
test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
set_bit(HCI_INIT, &hdev->flags);
|
2006-07-03 16:02:33 +08:00
|
|
|
__hci_request(hdev, hci_reset_req, 0,
|
2011-12-24 04:59:13 +08:00
|
|
|
msecs_to_jiffies(250));
|
2005-04-17 06:20:36 +08:00
|
|
|
clear_bit(HCI_INIT, &hdev->flags);
|
|
|
|
}
|
|
|
|
|
2011-12-15 09:53:47 +08:00
|
|
|
/* flush cmd work */
|
|
|
|
flush_work(&hdev->cmd_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Drop queues */
|
|
|
|
skb_queue_purge(&hdev->rx_q);
|
|
|
|
skb_queue_purge(&hdev->cmd_q);
|
|
|
|
skb_queue_purge(&hdev->raw_q);
|
|
|
|
|
|
|
|
/* Drop last sent command */
|
|
|
|
if (hdev->sent_cmd) {
|
2011-04-12 05:46:55 +08:00
|
|
|
del_timer_sync(&hdev->cmd_timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree_skb(hdev->sent_cmd);
|
|
|
|
hdev->sent_cmd = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* After this point our queues are empty
|
|
|
|
* and no tasks are scheduled. */
|
|
|
|
hdev->close(hdev);
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2011-11-09 02:40:14 +08:00
|
|
|
mgmt_powered(hdev, 0);
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2010-12-16 16:00:37 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Clear flags */
|
|
|
|
hdev->flags = 0;
|
|
|
|
|
|
|
|
hci_req_unlock(hdev);
|
|
|
|
|
|
|
|
hci_dev_put(hdev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_dev_close(__u16 dev)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
int err;
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
hdev = hci_dev_get(dev);
|
|
|
|
if (!hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
err = hci_dev_do_close(hdev);
|
|
|
|
hci_dev_put(hdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_dev_reset(__u16 dev)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
int ret = 0;
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
hdev = hci_dev_get(dev);
|
|
|
|
if (!hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
hci_req_lock(hdev);
|
|
|
|
|
|
|
|
if (!test_bit(HCI_UP, &hdev->flags))
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* Drop queues */
|
|
|
|
skb_queue_purge(&hdev->rx_q);
|
|
|
|
skb_queue_purge(&hdev->cmd_q);
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
inquiry_cache_flush(hdev);
|
|
|
|
hci_conn_hash_flush(hdev);
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (hdev->flush)
|
|
|
|
hdev->flush(hdev);
|
|
|
|
|
2007-02-09 22:24:33 +08:00
|
|
|
atomic_set(&hdev->cmd_cnt, 1);
|
2011-02-11 09:38:48 +08:00
|
|
|
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!test_bit(HCI_RAW, &hdev->flags))
|
2006-07-03 16:02:33 +08:00
|
|
|
ret = __hci_request(hdev, hci_reset_req, 0,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
done:
|
|
|
|
hci_req_unlock(hdev);
|
|
|
|
hci_dev_put(hdev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_dev_reset_stat(__u16 dev)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
int ret = 0;
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
hdev = hci_dev_get(dev);
|
|
|
|
if (!hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
|
|
|
|
|
|
|
|
hci_dev_put(hdev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_dev_cmd(unsigned int cmd, void __user *arg)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
struct hci_dev_req dr;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (copy_from_user(&dr, arg, sizeof(dr)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
hdev = hci_dev_get(dr.dev_id);
|
|
|
|
if (!hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case HCISETAUTH:
|
2006-07-03 16:02:33 +08:00
|
|
|
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HCISETENCRYPT:
|
|
|
|
if (!lmp_encrypt_capable(hdev)) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!test_bit(HCI_AUTH, &hdev->flags)) {
|
|
|
|
/* Auth must be enabled first */
|
2006-07-03 16:02:33 +08:00
|
|
|
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-07-03 16:02:33 +08:00
|
|
|
err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HCISETSCAN:
|
2006-07-03 16:02:33 +08:00
|
|
|
err = hci_request(hdev, hci_scan_req, dr.dev_opt,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HCISETLINKPOL:
|
2008-07-15 02:13:47 +08:00
|
|
|
err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
|
|
|
|
msecs_to_jiffies(HCI_INIT_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HCISETLINKMODE:
|
2008-07-15 02:13:47 +08:00
|
|
|
hdev->link_mode = ((__u16) dr.dev_opt) &
|
|
|
|
(HCI_LM_MASTER | HCI_LM_ACCEPT);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCISETPTYPE:
|
|
|
|
hdev->pkt_type = (__u16) dr.dev_opt;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HCISETACLMTU:
|
2008-07-15 02:13:47 +08:00
|
|
|
hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
|
|
|
|
hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HCISETSCOMTU:
|
2008-07-15 02:13:47 +08:00
|
|
|
hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
|
|
|
|
hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2008-07-15 02:13:47 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
hci_dev_put(hdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_get_dev_list(void __user *arg)
|
|
|
|
{
|
2011-11-01 16:58:56 +08:00
|
|
|
struct hci_dev *hdev;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct hci_dev_list_req *dl;
|
|
|
|
struct hci_dev_req *dr;
|
|
|
|
int n = 0, size, err;
|
|
|
|
__u16 dev_num;
|
|
|
|
|
|
|
|
if (get_user(dev_num, (__u16 __user *) arg))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
size = sizeof(*dl) + dev_num * sizeof(*dr);
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
dl = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!dl)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dr = dl->dev_req;
|
|
|
|
|
2011-12-23 02:30:27 +08:00
|
|
|
read_lock(&hci_dev_list_lock);
|
2011-11-01 16:58:56 +08:00
|
|
|
list_for_each_entry(hdev, &hci_dev_list, list) {
|
2012-01-09 05:11:15 +08:00
|
|
|
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
|
2011-11-09 07:44:22 +08:00
|
|
|
cancel_delayed_work(&hdev->power_off);
|
2011-01-26 19:11:03 +08:00
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
|
|
|
|
set_bit(HCI_PAIRABLE, &hdev->dev_flags);
|
2011-01-26 19:11:03 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
(dr + n)->dev_id = hdev->id;
|
|
|
|
(dr + n)->dev_opt = hdev->flags;
|
2011-01-26 19:11:03 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (++n >= dev_num)
|
|
|
|
break;
|
|
|
|
}
|
2011-12-23 02:30:27 +08:00
|
|
|
read_unlock(&hci_dev_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
dl->dev_num = n;
|
|
|
|
size = sizeof(*dl) + n * sizeof(*dr);
|
|
|
|
|
|
|
|
err = copy_to_user(arg, dl, size);
|
|
|
|
kfree(dl);
|
|
|
|
|
|
|
|
return err ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_get_dev_info(void __user *arg)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
struct hci_dev_info di;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (copy_from_user(&di, arg, sizeof(di)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
hdev = hci_dev_get(di.dev_id);
|
|
|
|
if (!hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
|
2011-11-08 04:16:04 +08:00
|
|
|
cancel_delayed_work_sync(&hdev->power_off);
|
2010-12-15 19:53:18 +08:00
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
|
|
|
|
set_bit(HCI_PAIRABLE, &hdev->dev_flags);
|
2011-01-26 19:11:03 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
strcpy(di.name, hdev->name);
|
|
|
|
di.bdaddr = hdev->bdaddr;
|
2010-02-13 09:28:41 +08:00
|
|
|
di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
|
2005-04-17 06:20:36 +08:00
|
|
|
di.flags = hdev->flags;
|
|
|
|
di.pkt_type = hdev->pkt_type;
|
|
|
|
di.acl_mtu = hdev->acl_mtu;
|
|
|
|
di.acl_pkts = hdev->acl_pkts;
|
|
|
|
di.sco_mtu = hdev->sco_mtu;
|
|
|
|
di.sco_pkts = hdev->sco_pkts;
|
|
|
|
di.link_policy = hdev->link_policy;
|
|
|
|
di.link_mode = hdev->link_mode;
|
|
|
|
|
|
|
|
memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
|
|
|
|
memcpy(&di.features, &hdev->features, sizeof(di.features));
|
|
|
|
|
|
|
|
if (copy_to_user(arg, &di, sizeof(di)))
|
|
|
|
err = -EFAULT;
|
|
|
|
|
|
|
|
hci_dev_put(hdev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- Interface to HCI drivers ---- */
|
|
|
|
|
2009-06-08 20:41:38 +08:00
|
|
|
static int hci_rfkill_set_block(void *data, bool blocked)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = data;
|
|
|
|
|
|
|
|
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
|
|
|
|
|
|
|
|
if (!blocked)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
hci_dev_do_close(hdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rfkill_ops hci_rfkill_ops = {
|
|
|
|
.set_block = hci_rfkill_set_block,
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Alloc HCI device */
|
|
|
|
struct hci_dev *hci_alloc_dev(void)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
|
2006-07-06 21:40:09 +08:00
|
|
|
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!hdev)
|
|
|
|
return NULL;
|
|
|
|
|
2011-10-08 20:58:47 +08:00
|
|
|
hci_init_sysfs(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
skb_queue_head_init(&hdev->driver_init);
|
|
|
|
|
|
|
|
return hdev;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_alloc_dev);
|
|
|
|
|
|
|
|
/* Free HCI device */
|
|
|
|
void hci_free_dev(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
skb_queue_purge(&hdev->driver_init);
|
|
|
|
|
2006-07-03 16:02:41 +08:00
|
|
|
/* will free via device release */
|
|
|
|
put_device(&hdev->dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_free_dev);
|
|
|
|
|
2010-12-15 19:53:18 +08:00
|
|
|
static void hci_power_on(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
if (hci_dev_open(hdev->id) < 0)
|
|
|
|
return;
|
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
|
2011-12-18 00:52:27 +08:00
|
|
|
schedule_delayed_work(&hdev->power_off,
|
2011-11-08 04:16:04 +08:00
|
|
|
msecs_to_jiffies(AUTO_OFF_TIMEOUT));
|
2010-12-15 19:53:18 +08:00
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
|
2011-11-09 02:40:14 +08:00
|
|
|
mgmt_index_added(hdev);
|
2010-12-15 19:53:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hci_power_off(struct work_struct *work)
|
|
|
|
{
|
2011-11-08 04:16:04 +08:00
|
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
|
|
|
power_off.work);
|
2010-12-15 19:53:18 +08:00
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
|
2010-12-15 19:53:18 +08:00
|
|
|
|
2011-11-08 04:16:04 +08:00
|
|
|
hci_dev_close(hdev->id);
|
2010-12-15 19:53:18 +08:00
|
|
|
}
|
|
|
|
|
2011-11-08 04:16:02 +08:00
|
|
|
static void hci_discov_off(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev;
|
|
|
|
u8 scan = SCAN_PAGE;
|
|
|
|
|
|
|
|
hdev = container_of(work, struct hci_dev, discov_off.work);
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2011-11-08 04:16:02 +08:00
|
|
|
|
|
|
|
hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
|
|
|
|
|
|
|
|
hdev->discov_timeout = 0;
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2011-11-08 04:16:02 +08:00
|
|
|
}
|
|
|
|
|
2011-01-04 18:08:51 +08:00
|
|
|
int hci_uuids_clear(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct list_head *p, *n;
|
|
|
|
|
|
|
|
list_for_each_safe(p, n, &hdev->uuids) {
|
|
|
|
struct bt_uuid *uuid;
|
|
|
|
|
|
|
|
uuid = list_entry(p, struct bt_uuid, list);
|
|
|
|
|
|
|
|
list_del(p);
|
|
|
|
kfree(uuid);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-17 20:41:05 +08:00
|
|
|
int hci_link_keys_clear(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct list_head *p, *n;
|
|
|
|
|
|
|
|
list_for_each_safe(p, n, &hdev->link_keys) {
|
|
|
|
struct link_key *key;
|
|
|
|
|
|
|
|
key = list_entry(p, struct link_key, list);
|
|
|
|
|
|
|
|
list_del(p);
|
|
|
|
kfree(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-02-03 08:08:00 +08:00
|
|
|
int hci_smp_ltks_clear(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct smp_ltk *k, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
|
|
|
|
list_del(&k->list);
|
|
|
|
kfree(k);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-17 20:41:05 +08:00
|
|
|
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
2011-11-01 16:58:56 +08:00
|
|
|
struct link_key *k;
|
2011-01-17 20:41:05 +08:00
|
|
|
|
2011-11-01 16:58:56 +08:00
|
|
|
list_for_each_entry(k, &hdev->link_keys, list)
|
2011-01-17 20:41:05 +08:00
|
|
|
if (bacmp(bdaddr, &k->bdaddr) == 0)
|
|
|
|
return k;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-04-29 02:28:59 +08:00
|
|
|
static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
|
|
|
|
u8 key_type, u8 old_key_type)
|
|
|
|
{
|
|
|
|
/* Legacy key */
|
|
|
|
if (key_type < 0x03)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Debug keys are insecure so don't store them persistently */
|
|
|
|
if (key_type == HCI_LK_DEBUG_COMBINATION)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Changed combination key and there's no previous one */
|
|
|
|
if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Security mode 3 case */
|
|
|
|
if (!conn)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Neither local nor remote side had no-bonding as requirement */
|
|
|
|
if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Local side had dedicated bonding as requirement */
|
|
|
|
if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Remote side had dedicated bonding as requirement */
|
|
|
|
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* If none of the above criteria match, then don't store the key
|
|
|
|
* persistently */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
|
2011-07-08 05:59:36 +08:00
|
|
|
{
|
2012-02-03 08:08:01 +08:00
|
|
|
struct smp_ltk *k;
|
2011-07-08 05:59:36 +08:00
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
list_for_each_entry(k, &hdev->long_term_keys, list) {
|
|
|
|
if (k->ediv != ediv ||
|
|
|
|
memcmp(rand, k->rand, sizeof(k->rand)))
|
2011-07-08 05:59:36 +08:00
|
|
|
continue;
|
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
return k;
|
2011-07-08 05:59:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_find_ltk);
|
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
|
|
|
u8 addr_type)
|
2011-07-08 05:59:36 +08:00
|
|
|
{
|
2012-02-03 08:08:01 +08:00
|
|
|
struct smp_ltk *k;
|
2011-07-08 05:59:36 +08:00
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
list_for_each_entry(k, &hdev->long_term_keys, list)
|
|
|
|
if (addr_type == k->bdaddr_type &&
|
|
|
|
bacmp(bdaddr, &k->bdaddr) == 0)
|
2011-07-08 05:59:36 +08:00
|
|
|
return k;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-02-03 08:08:01 +08:00
|
|
|
EXPORT_SYMBOL(hci_find_ltk_by_addr);
|
2011-07-08 05:59:36 +08:00
|
|
|
|
2011-04-29 02:28:59 +08:00
|
|
|
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
|
|
|
|
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
|
2011-01-17 20:41:05 +08:00
|
|
|
{
|
|
|
|
struct link_key *key, *old_key;
|
2011-04-29 02:29:03 +08:00
|
|
|
u8 old_key_type, persistent;
|
2011-01-17 20:41:05 +08:00
|
|
|
|
|
|
|
old_key = hci_find_link_key(hdev, bdaddr);
|
|
|
|
if (old_key) {
|
|
|
|
old_key_type = old_key->type;
|
|
|
|
key = old_key;
|
|
|
|
} else {
|
2011-04-29 02:29:00 +08:00
|
|
|
old_key_type = conn ? conn->key_type : 0xff;
|
2011-01-17 20:41:05 +08:00
|
|
|
key = kzalloc(sizeof(*key), GFP_ATOMIC);
|
|
|
|
if (!key)
|
|
|
|
return -ENOMEM;
|
|
|
|
list_add(&key->list, &hdev->link_keys);
|
|
|
|
}
|
|
|
|
|
|
|
|
BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
|
|
|
|
|
2011-04-29 02:28:59 +08:00
|
|
|
/* Some buggy controller combinations generate a changed
|
|
|
|
* combination key for legacy pairing even when there's no
|
|
|
|
* previous key */
|
|
|
|
if (type == HCI_LK_CHANGED_COMBINATION &&
|
|
|
|
(!conn || conn->remote_auth == 0xff) &&
|
2011-04-29 02:29:01 +08:00
|
|
|
old_key_type == 0xff) {
|
2011-04-29 02:28:59 +08:00
|
|
|
type = HCI_LK_COMBINATION;
|
2011-04-29 02:29:01 +08:00
|
|
|
if (conn)
|
|
|
|
conn->key_type = type;
|
|
|
|
}
|
2011-04-29 02:28:59 +08:00
|
|
|
|
2011-01-17 20:41:05 +08:00
|
|
|
bacpy(&key->bdaddr, bdaddr);
|
|
|
|
memcpy(key->val, val, 16);
|
|
|
|
key->pin_len = pin_len;
|
|
|
|
|
2011-04-28 18:07:53 +08:00
|
|
|
if (type == HCI_LK_CHANGED_COMBINATION)
|
2011-01-17 20:41:05 +08:00
|
|
|
key->type = old_key_type;
|
2011-04-29 02:29:02 +08:00
|
|
|
else
|
|
|
|
key->type = type;
|
|
|
|
|
2011-04-29 02:29:03 +08:00
|
|
|
if (!new_key)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
persistent = hci_persistent_key(hdev, conn, type, old_key_type);
|
|
|
|
|
2011-11-09 02:40:14 +08:00
|
|
|
mgmt_new_link_key(hdev, key, persistent);
|
2011-04-29 02:29:03 +08:00
|
|
|
|
|
|
|
if (!persistent) {
|
|
|
|
list_del(&key->list);
|
|
|
|
kfree(key);
|
|
|
|
}
|
2011-01-17 20:41:05 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
|
|
|
|
int new_key, u8 authenticated, u8 tk[16],
|
|
|
|
u8 enc_size, u16 ediv, u8 rand[8])
|
2011-07-08 05:59:36 +08:00
|
|
|
{
|
2012-02-03 08:08:01 +08:00
|
|
|
struct smp_ltk *key, *old_key;
|
2011-07-08 05:59:36 +08:00
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
|
|
|
|
return 0;
|
2011-07-08 05:59:36 +08:00
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
|
|
|
|
if (old_key)
|
2011-07-08 05:59:36 +08:00
|
|
|
key = old_key;
|
2012-02-03 08:08:01 +08:00
|
|
|
else {
|
|
|
|
key = kzalloc(sizeof(*key), GFP_ATOMIC);
|
2011-07-08 05:59:36 +08:00
|
|
|
if (!key)
|
|
|
|
return -ENOMEM;
|
2012-02-03 08:08:01 +08:00
|
|
|
list_add(&key->list, &hdev->long_term_keys);
|
2011-07-08 05:59:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bacpy(&key->bdaddr, bdaddr);
|
2012-02-03 08:08:01 +08:00
|
|
|
key->bdaddr_type = addr_type;
|
|
|
|
memcpy(key->val, tk, sizeof(key->val));
|
|
|
|
key->authenticated = authenticated;
|
|
|
|
key->ediv = ediv;
|
|
|
|
key->enc_size = enc_size;
|
|
|
|
key->type = type;
|
|
|
|
memcpy(key->rand, rand, sizeof(key->rand));
|
2011-07-08 05:59:36 +08:00
|
|
|
|
2012-02-03 08:08:01 +08:00
|
|
|
if (!new_key)
|
|
|
|
return 0;
|
2011-07-08 05:59:36 +08:00
|
|
|
|
2012-02-03 08:08:05 +08:00
|
|
|
if (type & HCI_SMP_LTK)
|
|
|
|
mgmt_new_ltk(hdev, key, 1);
|
|
|
|
|
2011-07-08 05:59:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-17 20:41:05 +08:00
|
|
|
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
|
|
|
struct link_key *key;
|
|
|
|
|
|
|
|
key = hci_find_link_key(hdev, bdaddr);
|
|
|
|
if (!key)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
|
|
|
|
|
|
|
|
list_del(&key->list);
|
|
|
|
kfree(key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-02-03 08:08:00 +08:00
|
|
|
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
|
|
|
struct smp_ltk *k, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
|
|
|
|
if (bacmp(bdaddr, &k->bdaddr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
|
|
|
|
|
|
|
|
list_del(&k->list);
|
|
|
|
kfree(k);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-16 22:32:41 +08:00
|
|
|
/* HCI command timer function */
|
|
|
|
static void hci_cmd_timer(unsigned long arg)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = (void *) arg;
|
|
|
|
|
|
|
|
BT_ERR("%s command tx timeout", hdev->name);
|
|
|
|
atomic_set(&hdev->cmd_cnt, 1);
|
2011-12-15 09:53:47 +08:00
|
|
|
queue_work(hdev->workqueue, &hdev->cmd_work);
|
2011-02-16 22:32:41 +08:00
|
|
|
}
|
|
|
|
|
2011-03-22 20:12:22 +08:00
|
|
|
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
|
|
|
|
bdaddr_t *bdaddr)
|
|
|
|
{
|
|
|
|
struct oob_data *data;
|
|
|
|
|
|
|
|
list_for_each_entry(data, &hdev->remote_oob_data, list)
|
|
|
|
if (bacmp(bdaddr, &data->bdaddr) == 0)
|
|
|
|
return data;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
|
|
|
struct oob_data *data;
|
|
|
|
|
|
|
|
data = hci_find_remote_oob_data(hdev, bdaddr);
|
|
|
|
if (!data)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
|
|
|
|
|
|
|
|
list_del(&data->list);
|
|
|
|
kfree(data);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_remote_oob_data_clear(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct oob_data *data, *n;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
|
|
|
|
list_del(&data->list);
|
|
|
|
kfree(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
|
|
|
|
u8 *randomizer)
|
|
|
|
{
|
|
|
|
struct oob_data *data;
|
|
|
|
|
|
|
|
data = hci_find_remote_oob_data(hdev, bdaddr);
|
|
|
|
|
|
|
|
if (!data) {
|
|
|
|
data = kmalloc(sizeof(*data), GFP_ATOMIC);
|
|
|
|
if (!data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
bacpy(&data->bdaddr, bdaddr);
|
|
|
|
list_add(&data->list, &hdev->remote_oob_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(data->hash, hash, sizeof(data->hash));
|
|
|
|
memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
|
|
|
|
|
|
|
|
BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-15 17:01:14 +08:00
|
|
|
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
|
|
|
|
bdaddr_t *bdaddr)
|
|
|
|
{
|
2011-11-01 16:58:56 +08:00
|
|
|
struct bdaddr_list *b;
|
2011-06-15 17:01:14 +08:00
|
|
|
|
2011-11-01 16:58:56 +08:00
|
|
|
list_for_each_entry(b, &hdev->blacklist, list)
|
2011-06-15 17:01:14 +08:00
|
|
|
if (bacmp(bdaddr, &b->bdaddr) == 0)
|
|
|
|
return b;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_blacklist_clear(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct list_head *p, *n;
|
|
|
|
|
|
|
|
list_for_each_safe(p, n, &hdev->blacklist) {
|
|
|
|
struct bdaddr_list *b;
|
|
|
|
|
|
|
|
b = list_entry(p, struct bdaddr_list, list);
|
|
|
|
|
|
|
|
list_del(p);
|
|
|
|
kfree(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
|
|
|
struct bdaddr_list *entry;
|
|
|
|
|
|
|
|
if (bacmp(bdaddr, BDADDR_ANY) == 0)
|
|
|
|
return -EBADF;
|
|
|
|
|
2011-08-25 21:48:02 +08:00
|
|
|
if (hci_blacklist_lookup(hdev, bdaddr))
|
|
|
|
return -EEXIST;
|
2011-06-15 17:01:14 +08:00
|
|
|
|
|
|
|
entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
|
2011-08-25 21:48:02 +08:00
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
2011-06-15 17:01:14 +08:00
|
|
|
|
|
|
|
bacpy(&entry->bdaddr, bdaddr);
|
|
|
|
|
|
|
|
list_add(&entry->list, &hdev->blacklist);
|
|
|
|
|
2011-11-09 02:40:14 +08:00
|
|
|
return mgmt_device_blocked(hdev, bdaddr);
|
2011-06-15 17:01:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
|
|
|
struct bdaddr_list *entry;
|
|
|
|
|
2011-11-16 16:32:21 +08:00
|
|
|
if (bacmp(bdaddr, BDADDR_ANY) == 0)
|
2011-08-25 21:48:02 +08:00
|
|
|
return hci_blacklist_clear(hdev);
|
2011-06-15 17:01:14 +08:00
|
|
|
|
|
|
|
entry = hci_blacklist_lookup(hdev, bdaddr);
|
2011-11-16 16:32:21 +08:00
|
|
|
if (!entry)
|
2011-08-25 21:48:02 +08:00
|
|
|
return -ENOENT;
|
2011-06-15 17:01:14 +08:00
|
|
|
|
|
|
|
list_del(&entry->list);
|
|
|
|
kfree(entry);
|
|
|
|
|
2011-11-09 02:40:14 +08:00
|
|
|
return mgmt_device_unblocked(hdev, bdaddr);
|
2011-06-15 17:01:14 +08:00
|
|
|
}
|
|
|
|
|
2011-06-21 03:39:29 +08:00
|
|
|
static void hci_clear_adv_cache(struct work_struct *work)
|
2011-05-27 03:23:53 +08:00
|
|
|
{
|
2011-06-21 03:39:29 +08:00
|
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
|
|
|
adv_work.work);
|
2011-05-27 03:23:53 +08:00
|
|
|
|
|
|
|
hci_dev_lock(hdev);
|
|
|
|
|
|
|
|
hci_adv_entries_clear(hdev);
|
|
|
|
|
|
|
|
hci_dev_unlock(hdev);
|
|
|
|
}
|
|
|
|
|
2011-05-27 03:23:50 +08:00
|
|
|
int hci_adv_entries_clear(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct adv_entry *entry, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
|
|
|
|
list_del(&entry->list);
|
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
BT_DBG("%s adv cache cleared", hdev->name);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
|
|
|
{
|
|
|
|
struct adv_entry *entry;
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &hdev->adv_entries, list)
|
|
|
|
if (bacmp(bdaddr, &entry->bdaddr) == 0)
|
|
|
|
return entry;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_connectable_adv(u8 evt_type)
|
|
|
|
{
|
|
|
|
if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hci_add_adv_entry(struct hci_dev *hdev,
|
|
|
|
struct hci_ev_le_advertising_info *ev)
|
|
|
|
{
|
|
|
|
struct adv_entry *entry;
|
|
|
|
|
|
|
|
if (!is_connectable_adv(ev->evt_type))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Only new entries should be added to adv_entries. So, if
|
|
|
|
* bdaddr was found, don't add it. */
|
|
|
|
if (hci_find_adv_entry(hdev, &ev->bdaddr))
|
|
|
|
return 0;
|
|
|
|
|
2012-01-31 10:31:28 +08:00
|
|
|
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
2011-05-27 03:23:50 +08:00
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
bacpy(&entry->bdaddr, &ev->bdaddr);
|
|
|
|
entry->bdaddr_type = ev->bdaddr_type;
|
|
|
|
|
|
|
|
list_add(&entry->list, &hdev->adv_entries);
|
|
|
|
|
|
|
|
BT_DBG("%s adv entry added: address %s type %u", hdev->name,
|
|
|
|
batostr(&entry->bdaddr), entry->bdaddr_type);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Register HCI device */
|
|
|
|
int hci_register_dev(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct list_head *head = &hci_dev_list, *p;
|
2011-11-03 07:18:36 +08:00
|
|
|
int i, id, error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-01-07 22:47:22 +08:00
|
|
|
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-01-07 22:47:07 +08:00
|
|
|
if (!hdev->open || !hdev->close)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-11-03 07:18:36 +08:00
|
|
|
/* Do not allow HCI_AMP devices to register at index 0,
|
|
|
|
* so the index can be used as the AMP controller ID.
|
|
|
|
*/
|
|
|
|
id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
|
|
|
|
|
2011-12-23 02:30:27 +08:00
|
|
|
write_lock(&hci_dev_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Find first available device id */
|
|
|
|
list_for_each(p, &hci_dev_list) {
|
|
|
|
if (list_entry(p, struct hci_dev, list)->id != id)
|
|
|
|
break;
|
|
|
|
head = p; id++;
|
|
|
|
}
|
2007-02-09 22:24:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
sprintf(hdev->name, "hci%d", id);
|
|
|
|
hdev->id = id;
|
2011-11-16 23:30:20 +08:00
|
|
|
list_add_tail(&hdev->list, head);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
mutex_init(&hdev->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
hdev->flags = 0;
|
2011-11-26 07:53:38 +08:00
|
|
|
hdev->dev_flags = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
|
2007-07-11 15:51:55 +08:00
|
|
|
hdev->esco_type = (ESCO_HV1);
|
2005-04-17 06:20:36 +08:00
|
|
|
hdev->link_mode = (HCI_LM_ACCEPT);
|
2011-01-25 19:28:33 +08:00
|
|
|
hdev->io_capability = 0x03; /* No Input No Output */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-07-03 16:02:33 +08:00
|
|
|
hdev->idle_timeout = 0;
|
|
|
|
hdev->sniff_max_interval = 800;
|
|
|
|
hdev->sniff_min_interval = 80;
|
|
|
|
|
2010-08-09 11:06:53 +08:00
|
|
|
INIT_WORK(&hdev->rx_work, hci_rx_work);
|
2011-12-15 09:53:47 +08:00
|
|
|
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
|
2011-12-15 10:50:02 +08:00
|
|
|
INIT_WORK(&hdev->tx_work, hci_tx_work);
|
2010-08-09 11:06:53 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
skb_queue_head_init(&hdev->rx_q);
|
|
|
|
skb_queue_head_init(&hdev->cmd_q);
|
|
|
|
skb_queue_head_init(&hdev->raw_q);
|
|
|
|
|
2011-02-16 22:32:41 +08:00
|
|
|
setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
|
|
|
|
|
2010-07-14 15:32:16 +08:00
|
|
|
for (i = 0; i < NUM_REASSEMBLY; i++)
|
2007-07-11 12:42:04 +08:00
|
|
|
hdev->reassembly[i] = NULL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
init_waitqueue_head(&hdev->req_wait_q);
|
2009-07-26 16:18:19 +08:00
|
|
|
mutex_init(&hdev->req_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-01-04 20:16:21 +08:00
|
|
|
discovery_init(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
hci_conn_hash_init(hdev);
|
|
|
|
|
2011-11-09 02:40:15 +08:00
|
|
|
INIT_LIST_HEAD(&hdev->mgmt_pending);
|
|
|
|
|
2010-07-31 12:54:49 +08:00
|
|
|
INIT_LIST_HEAD(&hdev->blacklist);
|
2010-05-18 19:20:32 +08:00
|
|
|
|
2011-01-04 18:08:51 +08:00
|
|
|
INIT_LIST_HEAD(&hdev->uuids);
|
|
|
|
|
2011-01-17 20:41:05 +08:00
|
|
|
INIT_LIST_HEAD(&hdev->link_keys);
|
2012-02-03 08:08:00 +08:00
|
|
|
INIT_LIST_HEAD(&hdev->long_term_keys);
|
2011-01-17 20:41:05 +08:00
|
|
|
|
2011-03-22 20:12:22 +08:00
|
|
|
INIT_LIST_HEAD(&hdev->remote_oob_data);
|
|
|
|
|
2011-05-27 03:23:50 +08:00
|
|
|
INIT_LIST_HEAD(&hdev->adv_entries);
|
|
|
|
|
2011-06-21 03:39:29 +08:00
|
|
|
INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
|
2010-12-15 19:53:18 +08:00
|
|
|
INIT_WORK(&hdev->power_on, hci_power_on);
|
2011-11-08 04:16:04 +08:00
|
|
|
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
|
2010-12-15 19:53:18 +08:00
|
|
|
|
2011-11-08 04:16:02 +08:00
|
|
|
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
|
|
|
|
|
|
|
|
atomic_set(&hdev->promisc, 0);
|
|
|
|
|
2011-12-23 02:30:27 +08:00
|
|
|
write_unlock(&hci_dev_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-18 03:47:30 +08:00
|
|
|
hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
|
|
|
|
WQ_MEM_RECLAIM, 1);
|
2011-10-08 20:58:49 +08:00
|
|
|
if (!hdev->workqueue) {
|
|
|
|
error = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
2010-03-20 22:20:04 +08:00
|
|
|
|
2011-10-08 20:58:49 +08:00
|
|
|
error = hci_add_sysfs(hdev);
|
|
|
|
if (error < 0)
|
|
|
|
goto err_wqueue;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-08 20:41:38 +08:00
|
|
|
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
|
|
|
|
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
|
|
|
|
if (hdev->rfkill) {
|
|
|
|
if (rfkill_register(hdev->rfkill) < 0) {
|
|
|
|
rfkill_destroy(hdev->rfkill);
|
|
|
|
hdev->rfkill = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-09 05:11:15 +08:00
|
|
|
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
|
|
|
|
set_bit(HCI_SETUP, &hdev->dev_flags);
|
2011-12-18 22:40:32 +08:00
|
|
|
schedule_work(&hdev->power_on);
|
2010-12-15 19:53:18 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
hci_notify(hdev, HCI_DEV_REG);
|
2012-01-07 22:47:24 +08:00
|
|
|
hci_dev_hold(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return id;
|
2010-03-20 22:20:04 +08:00
|
|
|
|
2011-10-08 20:58:49 +08:00
|
|
|
err_wqueue:
|
|
|
|
destroy_workqueue(hdev->workqueue);
|
|
|
|
err:
|
2011-12-23 02:30:27 +08:00
|
|
|
write_lock(&hci_dev_list_lock);
|
2010-03-20 22:20:04 +08:00
|
|
|
list_del(&hdev->list);
|
2011-12-23 02:30:27 +08:00
|
|
|
write_unlock(&hci_dev_list_lock);
|
2010-03-20 22:20:04 +08:00
|
|
|
|
2011-10-08 20:58:49 +08:00
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_register_dev);
|
|
|
|
|
|
|
|
/* Unregister HCI device */
|
2011-10-26 16:43:19 +08:00
|
|
|
void hci_unregister_dev(struct hci_dev *hdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-11 12:42:04 +08:00
|
|
|
int i;
|
|
|
|
|
2010-02-08 22:27:07 +08:00
|
|
|
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-23 02:30:27 +08:00
|
|
|
write_lock(&hci_dev_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
list_del(&hdev->list);
|
2011-12-23 02:30:27 +08:00
|
|
|
write_unlock(&hci_dev_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
hci_dev_do_close(hdev);
|
|
|
|
|
2010-07-14 15:32:16 +08:00
|
|
|
for (i = 0; i < NUM_REASSEMBLY; i++)
|
2007-07-11 12:42:04 +08:00
|
|
|
kfree_skb(hdev->reassembly[i]);
|
|
|
|
|
2010-12-15 19:53:18 +08:00
|
|
|
if (!test_bit(HCI_INIT, &hdev->flags) &&
|
2012-01-09 05:11:15 +08:00
|
|
|
!test_bit(HCI_SETUP, &hdev->dev_flags)) {
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2011-11-09 02:40:14 +08:00
|
|
|
mgmt_index_removed(hdev);
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2011-11-09 02:40:16 +08:00
|
|
|
}
|
2010-12-15 19:53:18 +08:00
|
|
|
|
2011-11-09 02:40:15 +08:00
|
|
|
/* mgmt_index_removed should take care of emptying the
|
|
|
|
* pending list */
|
|
|
|
BUG_ON(!list_empty(&hdev->mgmt_pending));
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
hci_notify(hdev, HCI_DEV_UNREG);
|
|
|
|
|
2009-06-08 20:41:38 +08:00
|
|
|
if (hdev->rfkill) {
|
|
|
|
rfkill_unregister(hdev->rfkill);
|
|
|
|
rfkill_destroy(hdev->rfkill);
|
|
|
|
}
|
|
|
|
|
2011-10-08 20:58:48 +08:00
|
|
|
hci_del_sysfs(hdev);
|
2008-03-06 10:45:59 +08:00
|
|
|
|
2011-06-21 03:39:29 +08:00
|
|
|
cancel_delayed_work_sync(&hdev->adv_work);
|
2011-02-16 07:22:03 +08:00
|
|
|
|
2010-03-20 22:20:04 +08:00
|
|
|
destroy_workqueue(hdev->workqueue);
|
|
|
|
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_lock(hdev);
|
2011-01-04 18:08:50 +08:00
|
|
|
hci_blacklist_clear(hdev);
|
2011-01-04 18:08:51 +08:00
|
|
|
hci_uuids_clear(hdev);
|
2011-01-17 20:41:05 +08:00
|
|
|
hci_link_keys_clear(hdev);
|
2012-02-03 08:08:00 +08:00
|
|
|
hci_smp_ltks_clear(hdev);
|
2011-03-22 20:12:22 +08:00
|
|
|
hci_remote_oob_data_clear(hdev);
|
2011-05-27 03:23:50 +08:00
|
|
|
hci_adv_entries_clear(hdev);
|
2011-06-18 00:03:21 +08:00
|
|
|
hci_dev_unlock(hdev);
|
2011-01-04 18:08:50 +08:00
|
|
|
|
2012-01-07 22:47:24 +08:00
|
|
|
hci_dev_put(hdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_unregister_dev);
|
|
|
|
|
|
|
|
/* Suspend HCI device */
|
|
|
|
int hci_suspend_dev(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
hci_notify(hdev, HCI_DEV_SUSPEND);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_suspend_dev);
|
|
|
|
|
|
|
|
/* Resume HCI device */
|
|
|
|
int hci_resume_dev(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
hci_notify(hdev, HCI_DEV_RESUME);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_resume_dev);
|
|
|
|
|
2009-11-18 07:40:39 +08:00
|
|
|
/* Receive frame from HCI drivers */
|
|
|
|
int hci_recv_frame(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
|
|
|
|
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
|
|
|
|
&& !test_bit(HCI_INIT, &hdev->flags))) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Incomming skb */
|
|
|
|
bt_cb(skb)->incoming = 1;
|
|
|
|
|
|
|
|
/* Time stamp */
|
|
|
|
__net_timestamp(skb);
|
|
|
|
|
|
|
|
skb_queue_tail(&hdev->rx_q, skb);
|
2010-08-09 11:06:53 +08:00
|
|
|
queue_work(hdev->workqueue, &hdev->rx_work);
|
2009-11-18 08:02:54 +08:00
|
|
|
|
2009-11-18 07:40:39 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_recv_frame);
|
|
|
|
|
2010-07-14 15:32:17 +08:00
|
|
|
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
|
2011-04-05 05:25:14 +08:00
|
|
|
int count, __u8 index)
|
2010-07-14 15:32:17 +08:00
|
|
|
{
|
|
|
|
int len = 0;
|
|
|
|
int hlen = 0;
|
|
|
|
int remain = count;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct bt_skb_cb *scb;
|
|
|
|
|
|
|
|
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
|
|
|
|
index >= NUM_REASSEMBLY)
|
|
|
|
return -EILSEQ;
|
|
|
|
|
|
|
|
skb = hdev->reassembly[index];
|
|
|
|
|
|
|
|
if (!skb) {
|
|
|
|
switch (type) {
|
|
|
|
case HCI_ACLDATA_PKT:
|
|
|
|
len = HCI_MAX_FRAME_SIZE;
|
|
|
|
hlen = HCI_ACL_HDR_SIZE;
|
|
|
|
break;
|
|
|
|
case HCI_EVENT_PKT:
|
|
|
|
len = HCI_MAX_EVENT_SIZE;
|
|
|
|
hlen = HCI_EVENT_HDR_SIZE;
|
|
|
|
break;
|
|
|
|
case HCI_SCODATA_PKT:
|
|
|
|
len = HCI_MAX_SCO_SIZE;
|
|
|
|
hlen = HCI_SCO_HDR_SIZE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-04-05 05:25:14 +08:00
|
|
|
skb = bt_skb_alloc(len, GFP_ATOMIC);
|
2010-07-14 15:32:17 +08:00
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
scb = (void *) skb->cb;
|
|
|
|
scb->expect = hlen;
|
|
|
|
scb->pkt_type = type;
|
|
|
|
|
|
|
|
skb->dev = (void *) hdev;
|
|
|
|
hdev->reassembly[index] = skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (count) {
|
|
|
|
scb = (void *) skb->cb;
|
|
|
|
len = min(scb->expect, (__u16)count);
|
|
|
|
|
|
|
|
memcpy(skb_put(skb, len), data, len);
|
|
|
|
|
|
|
|
count -= len;
|
|
|
|
data += len;
|
|
|
|
scb->expect -= len;
|
|
|
|
remain = count;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case HCI_EVENT_PKT:
|
|
|
|
if (skb->len == HCI_EVENT_HDR_SIZE) {
|
|
|
|
struct hci_event_hdr *h = hci_event_hdr(skb);
|
|
|
|
scb->expect = h->plen;
|
|
|
|
|
|
|
|
if (skb_tailroom(skb) < scb->expect) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
hdev->reassembly[index] = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_ACLDATA_PKT:
|
|
|
|
if (skb->len == HCI_ACL_HDR_SIZE) {
|
|
|
|
struct hci_acl_hdr *h = hci_acl_hdr(skb);
|
|
|
|
scb->expect = __le16_to_cpu(h->dlen);
|
|
|
|
|
|
|
|
if (skb_tailroom(skb) < scb->expect) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
hdev->reassembly[index] = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_SCODATA_PKT:
|
|
|
|
if (skb->len == HCI_SCO_HDR_SIZE) {
|
|
|
|
struct hci_sco_hdr *h = hci_sco_hdr(skb);
|
|
|
|
scb->expect = h->dlen;
|
|
|
|
|
|
|
|
if (skb_tailroom(skb) < scb->expect) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
hdev->reassembly[index] = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scb->expect == 0) {
|
|
|
|
/* Complete frame */
|
|
|
|
|
|
|
|
bt_cb(skb)->pkt_type = type;
|
|
|
|
hci_recv_frame(skb);
|
|
|
|
|
|
|
|
hdev->reassembly[index] = NULL;
|
|
|
|
return remain;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return remain;
|
|
|
|
}
|
|
|
|
|
2007-07-11 12:42:04 +08:00
|
|
|
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
|
|
|
|
{
|
2010-07-14 15:32:18 +08:00
|
|
|
int rem = 0;
|
|
|
|
|
2007-07-11 12:42:04 +08:00
|
|
|
if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
|
|
|
|
return -EILSEQ;
|
|
|
|
|
2010-07-24 12:34:54 +08:00
|
|
|
while (count) {
|
2011-04-05 05:25:14 +08:00
|
|
|
rem = hci_reassembly(hdev, type, data, count, type - 1);
|
2010-07-14 15:32:18 +08:00
|
|
|
if (rem < 0)
|
|
|
|
return rem;
|
2007-07-11 12:42:04 +08:00
|
|
|
|
2010-07-14 15:32:18 +08:00
|
|
|
data += (count - rem);
|
|
|
|
count = rem;
|
2011-06-03 19:51:19 +08:00
|
|
|
}
|
2007-07-11 12:42:04 +08:00
|
|
|
|
2010-07-14 15:32:18 +08:00
|
|
|
return rem;
|
2007-07-11 12:42:04 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_recv_fragment);
|
|
|
|
|
2010-07-14 15:32:19 +08:00
|
|
|
#define STREAM_REASSEMBLY 0
|
|
|
|
|
|
|
|
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
|
|
|
|
{
|
|
|
|
int type;
|
|
|
|
int rem = 0;
|
|
|
|
|
2010-07-24 12:34:54 +08:00
|
|
|
while (count) {
|
2010-07-14 15:32:19 +08:00
|
|
|
struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
|
|
|
|
|
|
|
|
if (!skb) {
|
|
|
|
struct { char type; } *pkt;
|
|
|
|
|
|
|
|
/* Start of the frame */
|
|
|
|
pkt = data;
|
|
|
|
type = pkt->type;
|
|
|
|
|
|
|
|
data++;
|
|
|
|
count--;
|
|
|
|
} else
|
|
|
|
type = bt_cb(skb)->pkt_type;
|
|
|
|
|
2011-04-05 05:25:14 +08:00
|
|
|
rem = hci_reassembly(hdev, type, data, count,
|
|
|
|
STREAM_REASSEMBLY);
|
2010-07-14 15:32:19 +08:00
|
|
|
if (rem < 0)
|
|
|
|
return rem;
|
|
|
|
|
|
|
|
data += (count - rem);
|
|
|
|
count = rem;
|
2011-06-03 19:51:19 +08:00
|
|
|
}
|
2010-07-14 15:32:19 +08:00
|
|
|
|
|
|
|
return rem;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_recv_stream_fragment);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* ---- Interface to upper protocols ---- */
|
|
|
|
|
|
|
|
int hci_register_cb(struct hci_cb *cb)
|
|
|
|
{
|
|
|
|
BT_DBG("%p name %s", cb, cb->name);
|
|
|
|
|
2011-12-23 02:30:27 +08:00
|
|
|
write_lock(&hci_cb_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
list_add(&cb->list, &hci_cb_list);
|
2011-12-23 02:30:27 +08:00
|
|
|
write_unlock(&hci_cb_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_register_cb);
|
|
|
|
|
|
|
|
int hci_unregister_cb(struct hci_cb *cb)
|
|
|
|
{
|
|
|
|
BT_DBG("%p name %s", cb, cb->name);
|
|
|
|
|
2011-12-23 02:30:27 +08:00
|
|
|
write_lock(&hci_cb_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
list_del(&cb->list);
|
2011-12-23 02:30:27 +08:00
|
|
|
write_unlock(&hci_cb_list_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_unregister_cb);
|
|
|
|
|
|
|
|
static int hci_send_frame(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
|
|
|
|
|
|
|
|
if (!hdev) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2005-08-10 11:30:28 +08:00
|
|
|
BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (atomic_read(&hdev->promisc)) {
|
|
|
|
/* Time stamp */
|
2005-08-15 08:24:31 +08:00
|
|
|
__net_timestamp(skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-12-16 16:17:38 +08:00
|
|
|
hci_send_to_sock(hdev, skb, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Get rid of skb owner, prior to sending to the driver. */
|
|
|
|
skb_orphan(skb);
|
|
|
|
|
|
|
|
return hdev->send(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send HCI command */
|
2007-10-20 19:33:56 +08:00
|
|
|
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int len = HCI_COMMAND_HDR_SIZE + plen;
|
|
|
|
struct hci_command_hdr *hdr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2007-10-20 19:33:56 +08:00
|
|
|
BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
skb = bt_skb_alloc(len, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
2007-07-11 12:42:04 +08:00
|
|
|
BT_ERR("%s no memory for command", hdev->name);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
|
2007-10-20 19:33:56 +08:00
|
|
|
hdr->opcode = cpu_to_le16(opcode);
|
2005-04-17 06:20:36 +08:00
|
|
|
hdr->plen = plen;
|
|
|
|
|
|
|
|
if (plen)
|
|
|
|
memcpy(skb_put(skb, plen), param, plen);
|
|
|
|
|
|
|
|
BT_DBG("skb len %d", skb->len);
|
|
|
|
|
2005-08-10 11:30:28 +08:00
|
|
|
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
|
2005-04-17 06:20:36 +08:00
|
|
|
skb->dev = (void *) hdev;
|
2009-11-18 08:02:54 +08:00
|
|
|
|
2011-01-10 19:28:59 +08:00
|
|
|
if (test_bit(HCI_INIT, &hdev->flags))
|
|
|
|
hdev->init_last_cmd = opcode;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
skb_queue_tail(&hdev->cmd_q, skb);
|
2011-12-15 09:53:47 +08:00
|
|
|
queue_work(hdev->workqueue, &hdev->cmd_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get data from the previously sent command */
|
2007-10-20 19:33:56 +08:00
|
|
|
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct hci_command_hdr *hdr;
|
|
|
|
|
|
|
|
if (!hdev->sent_cmd)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
hdr = (void *) hdev->sent_cmd->data;
|
|
|
|
|
2007-10-20 19:33:56 +08:00
|
|
|
if (hdr->opcode != cpu_to_le16(opcode))
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
|
|
|
|
2007-10-20 19:33:56 +08:00
|
|
|
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send ACL data */
|
|
|
|
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
|
|
|
|
{
|
|
|
|
struct hci_acl_hdr *hdr;
|
|
|
|
int len = skb->len;
|
|
|
|
|
2007-03-14 00:06:52 +08:00
|
|
|
skb_push(skb, HCI_ACL_HDR_SIZE);
|
|
|
|
skb_reset_transport_header(skb);
|
2007-04-26 09:04:18 +08:00
|
|
|
hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
|
2007-03-26 11:12:50 +08:00
|
|
|
hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
|
|
|
|
hdr->dlen = cpu_to_le16(len);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
|
|
|
|
struct sk_buff *skb, __u16 flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct hci_dev *hdev = conn->hdev;
|
|
|
|
struct sk_buff *list;
|
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
list = skb_shinfo(skb)->frag_list;
|
|
|
|
if (!list) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Non fragmented */
|
|
|
|
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
|
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
skb_queue_tail(queue, skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
/* Fragmented */
|
|
|
|
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
|
|
|
|
|
|
|
|
skb_shinfo(skb)->frag_list = NULL;
|
|
|
|
|
|
|
|
/* Queue all fragments atomically */
|
2011-12-23 02:35:05 +08:00
|
|
|
spin_lock(&queue->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
__skb_queue_tail(queue, skb);
|
2011-01-03 17:14:36 +08:00
|
|
|
|
|
|
|
flags &= ~ACL_START;
|
|
|
|
flags |= ACL_CONT;
|
2005-04-17 06:20:36 +08:00
|
|
|
do {
|
|
|
|
skb = list; list = list->next;
|
2007-02-09 22:24:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
skb->dev = (void *) hdev;
|
2005-08-10 11:30:28 +08:00
|
|
|
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
|
2011-01-03 17:14:36 +08:00
|
|
|
hci_add_acl_hdr(skb, conn->handle, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
|
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
__skb_queue_tail(queue, skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (list);
|
|
|
|
|
2011-12-23 02:35:05 +08:00
|
|
|
spin_unlock(&queue->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2011-11-02 21:52:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
|
|
|
|
{
|
|
|
|
struct hci_conn *conn = chan->conn;
|
|
|
|
struct hci_dev *hdev = conn->hdev;
|
|
|
|
|
|
|
|
BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
|
|
|
|
|
|
|
|
skb->dev = (void *) hdev;
|
|
|
|
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
|
|
|
|
hci_add_acl_hdr(skb, conn->handle, flags);
|
|
|
|
|
|
|
|
hci_queue_acl(conn, &chan->data_q, skb, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-15 10:50:02 +08:00
|
|
|
queue_work(hdev->workqueue, &hdev->tx_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_send_acl);
|
|
|
|
|
|
|
|
/* Send SCO data */
|
2010-05-02 03:15:35 +08:00
|
|
|
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct hci_dev *hdev = conn->hdev;
|
|
|
|
struct hci_sco_hdr hdr;
|
|
|
|
|
|
|
|
BT_DBG("%s len %d", hdev->name, skb->len);
|
|
|
|
|
2007-03-26 11:12:50 +08:00
|
|
|
hdr.handle = cpu_to_le16(conn->handle);
|
2005-04-17 06:20:36 +08:00
|
|
|
hdr.dlen = skb->len;
|
|
|
|
|
2007-03-14 00:06:52 +08:00
|
|
|
skb_push(skb, HCI_SCO_HDR_SIZE);
|
|
|
|
skb_reset_transport_header(skb);
|
2007-04-26 09:04:18 +08:00
|
|
|
memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
skb->dev = (void *) hdev;
|
2005-08-10 11:30:28 +08:00
|
|
|
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
|
2009-11-18 08:02:54 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
skb_queue_tail(&conn->data_q, skb);
|
2011-12-15 10:50:02 +08:00
|
|
|
queue_work(hdev->workqueue, &hdev->tx_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(hci_send_sco);
|
|
|
|
|
|
|
|
/* ---- HCI TX task (outgoing data) ---- */
|
|
|
|
|
|
|
|
/* HCI Connection scheduler */
|
|
|
|
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
|
|
|
|
{
|
|
|
|
struct hci_conn_hash *h = &hdev->conn_hash;
|
2011-11-01 16:58:56 +08:00
|
|
|
struct hci_conn *conn = NULL, *c;
|
2005-04-17 06:20:36 +08:00
|
|
|
int num = 0, min = ~0;
|
|
|
|
|
2007-02-09 22:24:33 +08:00
|
|
|
/* We don't have to lock device here. Connections are always
|
2005-04-17 06:20:36 +08:00
|
|
|
* added and removed with TX task disabled. */
|
2011-12-15 08:54:12 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(c, &h->list, list) {
|
2008-07-15 02:13:49 +08:00
|
|
|
if (c->type != type || skb_queue_empty(&c->data_q))
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
2008-07-15 02:13:49 +08:00
|
|
|
|
|
|
|
if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
|
|
|
|
continue;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
num++;
|
|
|
|
|
|
|
|
if (c->sent < min) {
|
|
|
|
min = c->sent;
|
|
|
|
conn = c;
|
|
|
|
}
|
2011-08-17 21:23:00 +08:00
|
|
|
|
|
|
|
if (hci_conn_num(hdev, type) == num)
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-12-15 08:54:12 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (conn) {
|
2011-02-11 09:38:48 +08:00
|
|
|
int cnt, q;
|
|
|
|
|
|
|
|
switch (conn->type) {
|
|
|
|
case ACL_LINK:
|
|
|
|
cnt = hdev->acl_cnt;
|
|
|
|
break;
|
|
|
|
case SCO_LINK:
|
|
|
|
case ESCO_LINK:
|
|
|
|
cnt = hdev->sco_cnt;
|
|
|
|
break;
|
|
|
|
case LE_LINK:
|
|
|
|
cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
cnt = 0;
|
|
|
|
BT_ERR("Unknown link type");
|
|
|
|
}
|
|
|
|
|
|
|
|
q = cnt / num;
|
2005-04-17 06:20:36 +08:00
|
|
|
*quote = q ? q : 1;
|
|
|
|
} else
|
|
|
|
*quote = 0;
|
|
|
|
|
|
|
|
BT_DBG("conn %p quote %d", conn, *quote);
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
2011-02-11 09:38:53 +08:00
|
|
|
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct hci_conn_hash *h = &hdev->conn_hash;
|
2011-11-01 16:58:56 +08:00
|
|
|
struct hci_conn *c;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-11 09:38:53 +08:00
|
|
|
BT_ERR("%s link tx timeout", hdev->name);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-12-15 08:54:12 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Kill stalled connections */
|
2011-12-15 08:54:12 +08:00
|
|
|
list_for_each_entry_rcu(c, &h->list, list) {
|
2011-02-11 09:38:53 +08:00
|
|
|
if (c->type == type && c->sent) {
|
|
|
|
BT_ERR("%s killing stalled connection %s",
|
2005-04-17 06:20:36 +08:00
|
|
|
hdev->name, batostr(&c->dst));
|
|
|
|
hci_acl_disconn(c, 0x13);
|
|
|
|
}
|
|
|
|
}
|
2011-12-15 08:54:12 +08:00
|
|
|
|
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
|
|
|
|
int *quote)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-11-02 21:52:01 +08:00
|
|
|
struct hci_conn_hash *h = &hdev->conn_hash;
|
|
|
|
struct hci_chan *chan = NULL;
|
|
|
|
int num = 0, min = ~0, cur_prio = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct hci_conn *conn;
|
2011-11-02 21:52:01 +08:00
|
|
|
int cnt, q, conn_num = 0;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
2011-12-15 08:54:12 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(conn, &h->list, list) {
|
2011-11-02 21:52:01 +08:00
|
|
|
struct hci_chan *tmp;
|
|
|
|
|
|
|
|
if (conn->type != type)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
conn_num++;
|
|
|
|
|
2011-12-15 01:08:48 +08:00
|
|
|
list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
|
2011-11-02 21:52:01 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (skb_queue_empty(&tmp->data_q))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
skb = skb_peek(&tmp->data_q);
|
|
|
|
if (skb->priority < cur_prio)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (skb->priority > cur_prio) {
|
|
|
|
num = 0;
|
|
|
|
min = ~0;
|
|
|
|
cur_prio = skb->priority;
|
|
|
|
}
|
|
|
|
|
|
|
|
num++;
|
|
|
|
|
|
|
|
if (conn->sent < min) {
|
|
|
|
min = conn->sent;
|
|
|
|
chan = tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hci_conn_num(hdev, type) == conn_num)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-12-15 08:54:12 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
if (!chan)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
switch (chan->conn->type) {
|
|
|
|
case ACL_LINK:
|
|
|
|
cnt = hdev->acl_cnt;
|
|
|
|
break;
|
|
|
|
case SCO_LINK:
|
|
|
|
case ESCO_LINK:
|
|
|
|
cnt = hdev->sco_cnt;
|
|
|
|
break;
|
|
|
|
case LE_LINK:
|
|
|
|
cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
cnt = 0;
|
|
|
|
BT_ERR("Unknown link type");
|
|
|
|
}
|
|
|
|
|
|
|
|
q = cnt / num;
|
|
|
|
*quote = q ? q : 1;
|
|
|
|
BT_DBG("chan %p quote %d", chan, *quote);
|
|
|
|
return chan;
|
|
|
|
}
|
|
|
|
|
2011-11-02 21:52:03 +08:00
|
|
|
static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
|
|
|
|
{
|
|
|
|
struct hci_conn_hash *h = &hdev->conn_hash;
|
|
|
|
struct hci_conn *conn;
|
|
|
|
int num = 0;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
2011-12-15 08:54:12 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(conn, &h->list, list) {
|
2011-11-02 21:52:03 +08:00
|
|
|
struct hci_chan *chan;
|
|
|
|
|
|
|
|
if (conn->type != type)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
num++;
|
|
|
|
|
2011-12-15 01:08:48 +08:00
|
|
|
list_for_each_entry_rcu(chan, &conn->chan_list, list) {
|
2011-11-02 21:52:03 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (chan->sent) {
|
|
|
|
chan->sent = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_queue_empty(&chan->data_q))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
skb = skb_peek(&chan->data_q);
|
|
|
|
if (skb->priority >= HCI_PRIO_MAX - 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
skb->priority = HCI_PRIO_MAX - 1;
|
|
|
|
|
|
|
|
BT_DBG("chan %p skb %p promoted to %d", chan, skb,
|
|
|
|
skb->priority);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hci_conn_num(hdev, type) == num)
|
|
|
|
break;
|
|
|
|
}
|
2011-12-15 08:54:12 +08:00
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2011-11-02 21:52:03 +08:00
|
|
|
}
|
|
|
|
|
2012-02-03 22:27:54 +08:00
|
|
|
static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
/* Calculate count of blocks used by this packet */
|
|
|
|
return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
|
|
|
|
}
|
|
|
|
|
2012-02-03 22:27:55 +08:00
|
|
|
static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
|
2011-11-02 21:52:01 +08:00
|
|
|
{
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!test_bit(HCI_RAW, &hdev->flags)) {
|
|
|
|
/* ACL tx timeout must be longer than maximum
|
|
|
|
* link supervision timeout (40.9 seconds) */
|
2012-02-03 22:27:55 +08:00
|
|
|
if (!cnt && time_after(jiffies, hdev->acl_last_tx +
|
2012-01-04 22:42:26 +08:00
|
|
|
msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
|
2011-02-11 09:38:53 +08:00
|
|
|
hci_link_tx_to(hdev, ACL_LINK);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-02-03 22:27:55 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-02-03 22:27:55 +08:00
|
|
|
static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
unsigned int cnt = hdev->acl_cnt;
|
|
|
|
struct hci_chan *chan;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int quote;
|
|
|
|
|
|
|
|
__check_timeout(hdev, cnt);
|
2006-07-03 16:02:33 +08:00
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
while (hdev->acl_cnt &&
|
|
|
|
(chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
|
2011-11-02 21:52:02 +08:00
|
|
|
u32 priority = (skb_peek(&chan->data_q))->priority;
|
|
|
|
while (quote-- && (skb = skb_peek(&chan->data_q))) {
|
2011-11-02 21:52:01 +08:00
|
|
|
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
|
|
|
|
skb->len, skb->priority);
|
|
|
|
|
2011-11-02 21:52:02 +08:00
|
|
|
/* Stop if priority has changed */
|
|
|
|
if (skb->priority < priority)
|
|
|
|
break;
|
|
|
|
|
|
|
|
skb = skb_dequeue(&chan->data_q);
|
|
|
|
|
2011-11-02 21:52:01 +08:00
|
|
|
hci_conn_enter_active_mode(chan->conn,
|
|
|
|
bt_cb(skb)->force_active);
|
2006-07-03 16:02:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
hci_send_frame(skb);
|
|
|
|
hdev->acl_last_tx = jiffies;
|
|
|
|
|
|
|
|
hdev->acl_cnt--;
|
2011-11-02 21:52:01 +08:00
|
|
|
chan->sent++;
|
|
|
|
chan->conn->sent++;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
2011-11-02 21:52:03 +08:00
|
|
|
|
|
|
|
if (cnt != hdev->acl_cnt)
|
|
|
|
hci_prio_recalculate(hdev, ACL_LINK);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-02-03 22:27:54 +08:00
|
|
|
static inline void hci_sched_acl_blk(struct hci_dev *hdev)
|
|
|
|
{
|
2012-02-03 22:27:55 +08:00
|
|
|
unsigned int cnt = hdev->block_cnt;
|
2012-02-03 22:27:54 +08:00
|
|
|
struct hci_chan *chan;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int quote;
|
|
|
|
|
2012-02-03 22:27:55 +08:00
|
|
|
__check_timeout(hdev, cnt);
|
2012-02-03 22:27:54 +08:00
|
|
|
|
|
|
|
while (hdev->block_cnt > 0 &&
|
|
|
|
(chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
|
|
|
|
u32 priority = (skb_peek(&chan->data_q))->priority;
|
|
|
|
while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
|
|
|
|
int blocks;
|
|
|
|
|
|
|
|
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
|
|
|
|
skb->len, skb->priority);
|
|
|
|
|
|
|
|
/* Stop if priority has changed */
|
|
|
|
if (skb->priority < priority)
|
|
|
|
break;
|
|
|
|
|
|
|
|
skb = skb_dequeue(&chan->data_q);
|
|
|
|
|
|
|
|
blocks = __get_blocks(hdev, skb);
|
|
|
|
if (blocks > hdev->block_cnt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hci_conn_enter_active_mode(chan->conn,
|
|
|
|
bt_cb(skb)->force_active);
|
|
|
|
|
|
|
|
hci_send_frame(skb);
|
|
|
|
hdev->acl_last_tx = jiffies;
|
|
|
|
|
|
|
|
hdev->block_cnt -= blocks;
|
|
|
|
quote -= blocks;
|
|
|
|
|
|
|
|
chan->sent += blocks;
|
|
|
|
chan->conn->sent += blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cnt != hdev->block_cnt)
|
|
|
|
hci_prio_recalculate(hdev, ACL_LINK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void hci_sched_acl(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
if (!hci_conn_num(hdev, ACL_LINK))
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (hdev->flow_ctl_mode) {
|
|
|
|
case HCI_FLOW_CTL_MODE_PACKET_BASED:
|
|
|
|
hci_sched_acl_pkt(hdev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_FLOW_CTL_MODE_BLOCK_BASED:
|
|
|
|
hci_sched_acl_blk(hdev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Schedule SCO */
|
|
|
|
static inline void hci_sched_sco(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct hci_conn *conn;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int quote;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
2011-08-17 21:23:00 +08:00
|
|
|
if (!hci_conn_num(hdev, SCO_LINK))
|
|
|
|
return;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
|
|
|
|
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
|
|
|
|
BT_DBG("skb %p len %d", skb, skb->len);
|
|
|
|
hci_send_frame(skb);
|
|
|
|
|
|
|
|
conn->sent++;
|
|
|
|
if (conn->sent == ~0)
|
|
|
|
conn->sent = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-20 20:55:10 +08:00
|
|
|
static inline void hci_sched_esco(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
struct hci_conn *conn;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int quote;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
2011-08-17 21:23:00 +08:00
|
|
|
if (!hci_conn_num(hdev, ESCO_LINK))
|
|
|
|
return;
|
|
|
|
|
2007-10-20 20:55:10 +08:00
|
|
|
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
|
|
|
|
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
|
|
|
|
BT_DBG("skb %p len %d", skb, skb->len);
|
|
|
|
hci_send_frame(skb);
|
|
|
|
|
|
|
|
conn->sent++;
|
|
|
|
if (conn->sent == ~0)
|
|
|
|
conn->sent = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-11 09:38:48 +08:00
|
|
|
static inline void hci_sched_le(struct hci_dev *hdev)
|
|
|
|
{
|
2011-11-02 21:52:01 +08:00
|
|
|
struct hci_chan *chan;
|
2011-02-11 09:38:48 +08:00
|
|
|
struct sk_buff *skb;
|
2011-11-02 21:52:03 +08:00
|
|
|
int quote, cnt, tmp;
|
2011-02-11 09:38:48 +08:00
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
2011-08-17 21:23:00 +08:00
|
|
|
if (!hci_conn_num(hdev, LE_LINK))
|
|
|
|
return;
|
|
|
|
|
2011-02-11 09:38:48 +08:00
|
|
|
if (!test_bit(HCI_RAW, &hdev->flags)) {
|
|
|
|
/* LE tx timeout must be longer than maximum
|
|
|
|
* link supervision timeout (40.9 seconds) */
|
2011-02-11 09:38:53 +08:00
|
|
|
if (!hdev->le_cnt && hdev->le_pkts &&
|
2011-02-11 09:38:48 +08:00
|
|
|
time_after(jiffies, hdev->le_last_tx + HZ * 45))
|
2011-02-11 09:38:53 +08:00
|
|
|
hci_link_tx_to(hdev, LE_LINK);
|
2011-02-11 09:38:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
|
2011-11-02 21:52:03 +08:00
|
|
|
tmp = cnt;
|
2011-11-02 21:52:01 +08:00
|
|
|
while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
|
2011-11-02 21:52:02 +08:00
|
|
|
u32 priority = (skb_peek(&chan->data_q))->priority;
|
|
|
|
while (quote-- && (skb = skb_peek(&chan->data_q))) {
|
2011-11-02 21:52:01 +08:00
|
|
|
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
|
|
|
|
skb->len, skb->priority);
|
2011-02-11 09:38:48 +08:00
|
|
|
|
2011-11-02 21:52:02 +08:00
|
|
|
/* Stop if priority has changed */
|
|
|
|
if (skb->priority < priority)
|
|
|
|
break;
|
|
|
|
|
|
|
|
skb = skb_dequeue(&chan->data_q);
|
|
|
|
|
2011-02-11 09:38:48 +08:00
|
|
|
hci_send_frame(skb);
|
|
|
|
hdev->le_last_tx = jiffies;
|
|
|
|
|
|
|
|
cnt--;
|
2011-11-02 21:52:01 +08:00
|
|
|
chan->sent++;
|
|
|
|
chan->conn->sent++;
|
2011-02-11 09:38:48 +08:00
|
|
|
}
|
|
|
|
}
|
2011-11-02 21:52:01 +08:00
|
|
|
|
2011-02-11 09:38:48 +08:00
|
|
|
if (hdev->le_pkts)
|
|
|
|
hdev->le_cnt = cnt;
|
|
|
|
else
|
|
|
|
hdev->acl_cnt = cnt;
|
2011-11-02 21:52:03 +08:00
|
|
|
|
|
|
|
if (cnt != tmp)
|
|
|
|
hci_prio_recalculate(hdev, LE_LINK);
|
2011-02-11 09:38:48 +08:00
|
|
|
}
|
|
|
|
|
2011-12-15 10:50:02 +08:00
|
|
|
static void hci_tx_work(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-12-15 10:50:02 +08:00
|
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
2011-02-11 09:38:48 +08:00
|
|
|
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
|
|
|
|
hdev->sco_cnt, hdev->le_cnt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Schedule queues and send stuff to HCI driver */
|
|
|
|
|
|
|
|
hci_sched_acl(hdev);
|
|
|
|
|
|
|
|
hci_sched_sco(hdev);
|
|
|
|
|
2007-10-20 20:55:10 +08:00
|
|
|
hci_sched_esco(hdev);
|
|
|
|
|
2011-02-11 09:38:48 +08:00
|
|
|
hci_sched_le(hdev);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Send next queued raw (unknown type) packet */
|
|
|
|
while ((skb = skb_dequeue(&hdev->raw_q)))
|
|
|
|
hci_send_frame(skb);
|
|
|
|
}
|
|
|
|
|
2011-03-31 09:57:33 +08:00
|
|
|
/* ----- HCI RX task (incoming data processing) ----- */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* ACL data packet */
|
|
|
|
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct hci_acl_hdr *hdr = (void *) skb->data;
|
|
|
|
struct hci_conn *conn;
|
|
|
|
__u16 handle, flags;
|
|
|
|
|
|
|
|
skb_pull(skb, HCI_ACL_HDR_SIZE);
|
|
|
|
|
|
|
|
handle = __le16_to_cpu(hdr->handle);
|
|
|
|
flags = hci_flags(handle);
|
|
|
|
handle = hci_handle(handle);
|
|
|
|
|
|
|
|
BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
|
|
|
|
|
|
|
|
hdev->stat.acl_rx++;
|
|
|
|
|
|
|
|
hci_dev_lock(hdev);
|
|
|
|
conn = hci_conn_hash_lookup_handle(hdev, handle);
|
|
|
|
hci_dev_unlock(hdev);
|
2007-02-09 22:24:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (conn) {
|
2011-12-14 07:06:02 +08:00
|
|
|
hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
|
2006-07-03 16:02:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Send to upper protocol */
|
2011-12-21 20:11:33 +08:00
|
|
|
l2cap_recv_acldata(conn, skb, flags);
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2007-02-09 22:24:33 +08:00
|
|
|
BT_ERR("%s ACL packet for unknown connection handle %d",
|
2005-04-17 06:20:36 +08:00
|
|
|
hdev->name, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SCO data packet */
|
|
|
|
static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct hci_sco_hdr *hdr = (void *) skb->data;
|
|
|
|
struct hci_conn *conn;
|
|
|
|
__u16 handle;
|
|
|
|
|
|
|
|
skb_pull(skb, HCI_SCO_HDR_SIZE);
|
|
|
|
|
|
|
|
handle = __le16_to_cpu(hdr->handle);
|
|
|
|
|
|
|
|
BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
|
|
|
|
|
|
|
|
hdev->stat.sco_rx++;
|
|
|
|
|
|
|
|
hci_dev_lock(hdev);
|
|
|
|
conn = hci_conn_hash_lookup_handle(hdev, handle);
|
|
|
|
hci_dev_unlock(hdev);
|
|
|
|
|
|
|
|
if (conn) {
|
|
|
|
/* Send to upper protocol */
|
2011-12-21 20:11:33 +08:00
|
|
|
sco_recv_scodata(conn, skb);
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2007-02-09 22:24:33 +08:00
|
|
|
BT_ERR("%s SCO packet for unknown connection handle %d",
|
2005-04-17 06:20:36 +08:00
|
|
|
hdev->name, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
2010-08-09 11:06:53 +08:00
|
|
|
static void hci_rx_work(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-08-09 11:06:53 +08:00
|
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
while ((skb = skb_dequeue(&hdev->rx_q))) {
|
|
|
|
if (atomic_read(&hdev->promisc)) {
|
|
|
|
/* Send copy to the sockets */
|
2010-12-16 16:17:38 +08:00
|
|
|
hci_send_to_sock(hdev, skb, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (test_bit(HCI_RAW, &hdev->flags)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (test_bit(HCI_INIT, &hdev->flags)) {
|
|
|
|
/* Don't process data packets in this states. */
|
2005-08-10 11:30:28 +08:00
|
|
|
switch (bt_cb(skb)->pkt_type) {
|
2005-04-17 06:20:36 +08:00
|
|
|
case HCI_ACLDATA_PKT:
|
|
|
|
case HCI_SCODATA_PKT:
|
|
|
|
kfree_skb(skb);
|
|
|
|
continue;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Process frame */
|
2005-08-10 11:30:28 +08:00
|
|
|
switch (bt_cb(skb)->pkt_type) {
|
2005-04-17 06:20:36 +08:00
|
|
|
case HCI_EVENT_PKT:
|
2010-08-09 11:06:53 +08:00
|
|
|
BT_DBG("%s Event packet", hdev->name);
|
2005-04-17 06:20:36 +08:00
|
|
|
hci_event_packet(hdev, skb);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_ACLDATA_PKT:
|
|
|
|
BT_DBG("%s ACL data packet", hdev->name);
|
|
|
|
hci_acldata_packet(hdev, skb);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HCI_SCODATA_PKT:
|
|
|
|
BT_DBG("%s SCO data packet", hdev->name);
|
|
|
|
hci_scodata_packet(hdev, skb);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
kfree_skb(skb);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-15 09:53:47 +08:00
|
|
|
static void hci_cmd_work(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-12-15 09:53:47 +08:00
|
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
|
|
|
|
|
|
|
|
/* Send queued commands */
|
2011-01-11 23:20:20 +08:00
|
|
|
if (atomic_read(&hdev->cmd_cnt)) {
|
|
|
|
skb = skb_dequeue(&hdev->cmd_q);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
2009-02-25 18:29:52 +08:00
|
|
|
kfree_skb(hdev->sent_cmd);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-12-01 22:58:25 +08:00
|
|
|
hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (hdev->sent_cmd) {
|
2005-04-17 06:20:36 +08:00
|
|
|
atomic_dec(&hdev->cmd_cnt);
|
|
|
|
hci_send_frame(skb);
|
2011-07-27 04:46:54 +08:00
|
|
|
if (test_bit(HCI_RESET, &hdev->flags))
|
|
|
|
del_timer(&hdev->cmd_timer);
|
|
|
|
else
|
|
|
|
mod_timer(&hdev->cmd_timer,
|
2011-02-16 22:32:41 +08:00
|
|
|
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
skb_queue_head(&hdev->cmd_q, skb);
|
2011-12-15 09:53:47 +08:00
|
|
|
queue_work(hdev->workqueue, &hdev->cmd_work);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-11-07 22:45:24 +08:00
|
|
|
|
|
|
|
int hci_do_inquiry(struct hci_dev *hdev, u8 length)
|
|
|
|
{
|
|
|
|
/* General inquiry access code (GIAC) */
|
|
|
|
u8 lap[3] = { 0x33, 0x8b, 0x9e };
|
|
|
|
struct hci_cp_inquiry cp;
|
|
|
|
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
if (test_bit(HCI_INQUIRY, &hdev->flags))
|
|
|
|
return -EINPROGRESS;
|
|
|
|
|
2012-01-02 22:06:08 +08:00
|
|
|
inquiry_cache_flush(hdev);
|
|
|
|
|
2011-11-07 22:45:24 +08:00
|
|
|
memset(&cp, 0, sizeof(cp));
|
|
|
|
memcpy(&cp.lap, lap, sizeof(cp.lap));
|
|
|
|
cp.length = length;
|
|
|
|
|
|
|
|
return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
|
|
|
|
}
|
2011-11-05 01:16:52 +08:00
|
|
|
|
|
|
|
int hci_cancel_inquiry(struct hci_dev *hdev)
|
|
|
|
{
|
|
|
|
BT_DBG("%s", hdev->name);
|
|
|
|
|
|
|
|
if (!test_bit(HCI_INQUIRY, &hdev->flags))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
|
|
|
}
|
2011-11-18 19:35:42 +08:00
|
|
|
|
|
|
|
module_param(enable_hs, bool, 0644);
|
|
|
|
MODULE_PARM_DESC(enable_hs, "Enable High Speed");
|