mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
Johan Hedberg says: ==================== pull request: bluetooth-next 2015-08-16 Here's what's likely the last bluetooth-next pull request for 4.3: - 6lowpan/802.15.4 refactoring, cleanups & fixes - Document 6lowpan netdev usage in Documentation/networking/6lowpan.txt - Support for UART based QCA Bluetooth controllers - Power management support for Broeadcom Bluetooth controllers - Change LE connection initiation to always use passive scanning first - Support for new Silicon Wave USB ID Please let me know if there are any issues pulling. Thanks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0aa65cc0c2
50
Documentation/networking/6lowpan.txt
Normal file
50
Documentation/networking/6lowpan.txt
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
|
||||||
|
Netdev private dataroom for 6lowpan interfaces:
|
||||||
|
|
||||||
|
All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
|
||||||
|
must have "struct lowpan_priv" placed at beginning of netdev_priv.
|
||||||
|
|
||||||
|
The priv_size of each interface should be calculate by:
|
||||||
|
|
||||||
|
dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
|
||||||
|
|
||||||
|
Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
|
||||||
|
To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
|
||||||
|
|
||||||
|
lowpan_priv(dev)-priv;
|
||||||
|
|
||||||
|
to your LL_6LOWPAN_PRIV_DATA structure.
|
||||||
|
|
||||||
|
Before registering the lowpan netdev interface you must run:
|
||||||
|
|
||||||
|
lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
|
||||||
|
|
||||||
|
wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
|
||||||
|
enum lowpan_lltypes.
|
||||||
|
|
||||||
|
Example to evaluate the private usually you can do:
|
||||||
|
|
||||||
|
static inline sturct lowpan_priv_foobar *
|
||||||
|
lowpan_foobar_priv(struct net_device *dev)
|
||||||
|
{
|
||||||
|
return (sturct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (dev->type) {
|
||||||
|
case ARPHRD_6LOWPAN:
|
||||||
|
lowpan_priv = lowpan_priv(dev);
|
||||||
|
/* do great stuff which is ARPHRD_6LOWPAN related */
|
||||||
|
switch (lowpan_priv->lltype) {
|
||||||
|
case LOWPAN_LLTYPE_FOOBAR:
|
||||||
|
/* do 802.15.4 6LoWPAN handling here */
|
||||||
|
lowpan_foobar_priv(dev)->bar = foo;
|
||||||
|
break;
|
||||||
|
...
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
|
||||||
|
on ARPHRD_6LOWPAN, because you can be sure that these function are called
|
||||||
|
by ARPHRD_6LOWPAN interfaces.
|
@ -158,6 +158,7 @@ L: linux-wpan@vger.kernel.org
|
|||||||
S: Maintained
|
S: Maintained
|
||||||
F: net/6lowpan/
|
F: net/6lowpan/
|
||||||
F: include/net/6lowpan.h
|
F: include/net/6lowpan.h
|
||||||
|
F: Documentation/networking/6lowpan.txt
|
||||||
|
|
||||||
6PACK NETWORK DRIVER FOR AX.25
|
6PACK NETWORK DRIVER FOR AX.25
|
||||||
M: Andreas Koensgen <ajk@comnets.uni-bremen.de>
|
M: Andreas Koensgen <ajk@comnets.uni-bremen.de>
|
||||||
|
@ -13,6 +13,10 @@ config BT_RTL
|
|||||||
tristate
|
tristate
|
||||||
select FW_LOADER
|
select FW_LOADER
|
||||||
|
|
||||||
|
config BT_QCA
|
||||||
|
tristate
|
||||||
|
select FW_LOADER
|
||||||
|
|
||||||
config BT_HCIBTUSB
|
config BT_HCIBTUSB
|
||||||
tristate "HCI USB driver"
|
tristate "HCI USB driver"
|
||||||
depends on USB
|
depends on USB
|
||||||
@ -151,6 +155,19 @@ config BT_HCIUART_BCM
|
|||||||
|
|
||||||
Say Y here to compile support for Broadcom protocol.
|
Say Y here to compile support for Broadcom protocol.
|
||||||
|
|
||||||
|
config BT_HCIUART_QCA
|
||||||
|
bool "Qualcomm Atheros protocol support"
|
||||||
|
depends on BT_HCIUART
|
||||||
|
select BT_HCIUART_H4
|
||||||
|
select BT_QCA
|
||||||
|
help
|
||||||
|
The Qualcomm Atheros protocol supports HCI In-Band Sleep feature
|
||||||
|
over serial port interface(H4) between controller and host.
|
||||||
|
This protocol is required for UART clock control for QCA Bluetooth
|
||||||
|
devices.
|
||||||
|
|
||||||
|
Say Y here to compile support for QCA protocol.
|
||||||
|
|
||||||
config BT_HCIBCM203X
|
config BT_HCIBCM203X
|
||||||
tristate "HCI BCM203x USB driver"
|
tristate "HCI BCM203x USB driver"
|
||||||
depends on USB
|
depends on USB
|
||||||
|
@ -22,6 +22,7 @@ obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
|
|||||||
obj-$(CONFIG_BT_WILINK) += btwilink.o
|
obj-$(CONFIG_BT_WILINK) += btwilink.o
|
||||||
obj-$(CONFIG_BT_BCM) += btbcm.o
|
obj-$(CONFIG_BT_BCM) += btbcm.o
|
||||||
obj-$(CONFIG_BT_RTL) += btrtl.o
|
obj-$(CONFIG_BT_RTL) += btrtl.o
|
||||||
|
obj-$(CONFIG_BT_QCA) += btqca.o
|
||||||
|
|
||||||
btmrvl-y := btmrvl_main.o
|
btmrvl-y := btmrvl_main.o
|
||||||
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
|
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
|
||||||
@ -34,6 +35,7 @@ hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
|
|||||||
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
|
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
|
||||||
hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
|
hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
|
||||||
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
|
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
|
||||||
|
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
|
||||||
hci_uart-objs := $(hci_uart-y)
|
hci_uart-objs := $(hci_uart-y)
|
||||||
|
|
||||||
ccflags-y += -D__CHECK_ENDIAN__
|
ccflags-y += -D__CHECK_ENDIAN__
|
||||||
|
@ -1071,8 +1071,6 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sdio_release_host(card->func);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* winner or not, with this test the FW synchronizes when the
|
* winner or not, with this test the FW synchronizes when the
|
||||||
* module can continue its initialization
|
* module can continue its initialization
|
||||||
@ -1082,6 +1080,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
|
|||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sdio_release_host(card->func);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
|
392
drivers/bluetooth/btqca.c
Normal file
392
drivers/bluetooth/btqca.c
Normal file
@ -0,0 +1,392 @@
|
|||||||
|
/*
|
||||||
|
* Bluetooth supports for Qualcomm Atheros chips
|
||||||
|
*
|
||||||
|
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2
|
||||||
|
* as published by the Free Software Foundation
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/firmware.h>
|
||||||
|
|
||||||
|
#include <net/bluetooth/bluetooth.h>
|
||||||
|
#include <net/bluetooth/hci_core.h>
|
||||||
|
|
||||||
|
#include "btqca.h"
|
||||||
|
|
||||||
|
#define VERSION "0.1"
|
||||||
|
|
||||||
|
static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
struct edl_event_hdr *edl;
|
||||||
|
struct rome_version *ver;
|
||||||
|
char cmd;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
BT_DBG("%s: ROME Patch Version Request", hdev->name);
|
||||||
|
|
||||||
|
cmd = EDL_PATCH_VER_REQ_CMD;
|
||||||
|
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
|
||||||
|
&cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
|
||||||
|
if (IS_ERR(skb)) {
|
||||||
|
err = PTR_ERR(skb);
|
||||||
|
BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
|
||||||
|
err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (skb->len != sizeof(*edl) + sizeof(*ver)) {
|
||||||
|
BT_ERR("%s: Version size mismatch len %d", hdev->name,
|
||||||
|
skb->len);
|
||||||
|
err = -EILSEQ;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
edl = (struct edl_event_hdr *)(skb->data);
|
||||||
|
if (!edl || !edl->data) {
|
||||||
|
BT_ERR("%s: TLV with no header or no data", hdev->name);
|
||||||
|
err = -EILSEQ;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
|
||||||
|
edl->rtype != EDL_APP_VER_RES_EVT) {
|
||||||
|
BT_ERR("%s: Wrong packet received %d %d", hdev->name,
|
||||||
|
edl->cresp, edl->rtype);
|
||||||
|
err = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ver = (struct rome_version *)(edl->data);
|
||||||
|
|
||||||
|
BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
|
||||||
|
BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
|
||||||
|
BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
|
||||||
|
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
|
||||||
|
|
||||||
|
/* ROME chipset version can be decided by patch and SoC
|
||||||
|
* version, combination with upper 2 bytes from SoC
|
||||||
|
* and lower 2 bytes from patch will be used.
|
||||||
|
*/
|
||||||
|
*rome_version = (le32_to_cpu(ver->soc_id) << 16) |
|
||||||
|
(le16_to_cpu(ver->rome_ver) & 0x0000ffff);
|
||||||
|
|
||||||
|
out:
|
||||||
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rome_reset(struct hci_dev *hdev)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
BT_DBG("%s: ROME HCI_RESET", hdev->name);
|
||||||
|
|
||||||
|
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
|
||||||
|
if (IS_ERR(skb)) {
|
||||||
|
err = PTR_ERR(skb);
|
||||||
|
BT_ERR("%s: Reset failed (%d)", hdev->name, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rome_tlv_check_data(struct rome_config *config,
|
||||||
|
const struct firmware *fw)
|
||||||
|
{
|
||||||
|
const u8 *data;
|
||||||
|
u32 type_len;
|
||||||
|
u16 tag_id, tag_len;
|
||||||
|
int idx, length;
|
||||||
|
struct tlv_type_hdr *tlv;
|
||||||
|
struct tlv_type_patch *tlv_patch;
|
||||||
|
struct tlv_type_nvm *tlv_nvm;
|
||||||
|
|
||||||
|
tlv = (struct tlv_type_hdr *)fw->data;
|
||||||
|
|
||||||
|
type_len = le32_to_cpu(tlv->type_len);
|
||||||
|
length = (type_len >> 8) & 0x00ffffff;
|
||||||
|
|
||||||
|
BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
|
||||||
|
BT_DBG("Length\t\t : %d bytes", length);
|
||||||
|
|
||||||
|
switch (config->type) {
|
||||||
|
case TLV_TYPE_PATCH:
|
||||||
|
tlv_patch = (struct tlv_type_patch *)tlv->data;
|
||||||
|
BT_DBG("Total Length\t\t : %d bytes",
|
||||||
|
le32_to_cpu(tlv_patch->total_size));
|
||||||
|
BT_DBG("Patch Data Length\t : %d bytes",
|
||||||
|
le32_to_cpu(tlv_patch->data_length));
|
||||||
|
BT_DBG("Signing Format Version : 0x%x",
|
||||||
|
tlv_patch->format_version);
|
||||||
|
BT_DBG("Signature Algorithm\t : 0x%x",
|
||||||
|
tlv_patch->signature);
|
||||||
|
BT_DBG("Reserved\t\t : 0x%x",
|
||||||
|
le16_to_cpu(tlv_patch->reserved1));
|
||||||
|
BT_DBG("Product ID\t\t : 0x%04x",
|
||||||
|
le16_to_cpu(tlv_patch->product_id));
|
||||||
|
BT_DBG("Rom Build Version\t : 0x%04x",
|
||||||
|
le16_to_cpu(tlv_patch->rom_build));
|
||||||
|
BT_DBG("Patch Version\t\t : 0x%04x",
|
||||||
|
le16_to_cpu(tlv_patch->patch_version));
|
||||||
|
BT_DBG("Reserved\t\t : 0x%x",
|
||||||
|
le16_to_cpu(tlv_patch->reserved2));
|
||||||
|
BT_DBG("Patch Entry Address\t : 0x%x",
|
||||||
|
le32_to_cpu(tlv_patch->entry));
|
||||||
|
break;
|
||||||
|
|
||||||
|
case TLV_TYPE_NVM:
|
||||||
|
idx = 0;
|
||||||
|
data = tlv->data;
|
||||||
|
while (idx < length) {
|
||||||
|
tlv_nvm = (struct tlv_type_nvm *)(data + idx);
|
||||||
|
|
||||||
|
tag_id = le16_to_cpu(tlv_nvm->tag_id);
|
||||||
|
tag_len = le16_to_cpu(tlv_nvm->tag_len);
|
||||||
|
|
||||||
|
/* Update NVM tags as needed */
|
||||||
|
switch (tag_id) {
|
||||||
|
case EDL_TAG_ID_HCI:
|
||||||
|
/* HCI transport layer parameters
|
||||||
|
* enabling software inband sleep
|
||||||
|
* onto controller side.
|
||||||
|
*/
|
||||||
|
tlv_nvm->data[0] |= 0x80;
|
||||||
|
|
||||||
|
/* UART Baud Rate */
|
||||||
|
tlv_nvm->data[2] = config->user_baud_rate;
|
||||||
|
|
||||||
|
break;
|
||||||
|
|
||||||
|
case EDL_TAG_ID_DEEP_SLEEP:
|
||||||
|
/* Sleep enable mask
|
||||||
|
* enabling deep sleep feature on controller.
|
||||||
|
*/
|
||||||
|
tlv_nvm->data[0] |= 0x01;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
BT_ERR("Unknown TLV type %d", config->type);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
|
||||||
|
const u8 *data)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
struct edl_event_hdr *edl;
|
||||||
|
struct tlv_seg_resp *tlv_resp;
|
||||||
|
u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
|
||||||
|
|
||||||
|
cmd[0] = EDL_PATCH_TLV_REQ_CMD;
|
||||||
|
cmd[1] = seg_size;
|
||||||
|
memcpy(cmd + 2, data, seg_size);
|
||||||
|
|
||||||
|
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
|
||||||
|
HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
|
||||||
|
if (IS_ERR(skb)) {
|
||||||
|
err = PTR_ERR(skb);
|
||||||
|
BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
|
||||||
|
BT_ERR("%s: TLV response size mismatch", hdev->name);
|
||||||
|
err = -EILSEQ;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
edl = (struct edl_event_hdr *)(skb->data);
|
||||||
|
if (!edl || !edl->data) {
|
||||||
|
BT_ERR("%s: TLV with no header or no data", hdev->name);
|
||||||
|
err = -EILSEQ;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
tlv_resp = (struct tlv_seg_resp *)(edl->data);
|
||||||
|
|
||||||
|
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
|
||||||
|
edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
|
||||||
|
BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
|
||||||
|
hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
|
||||||
|
err = -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rome_tlv_download_request(struct hci_dev *hdev,
|
||||||
|
const struct firmware *fw)
|
||||||
|
{
|
||||||
|
const u8 *buffer, *data;
|
||||||
|
int total_segment, remain_size;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
if (!fw || !fw->data)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
|
||||||
|
remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
|
||||||
|
|
||||||
|
BT_DBG("%s: Total segment num %d remain size %d total size %zu",
|
||||||
|
hdev->name, total_segment, remain_size, fw->size);
|
||||||
|
|
||||||
|
data = fw->data;
|
||||||
|
for (i = 0; i < total_segment; i++) {
|
||||||
|
buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
|
||||||
|
ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
|
||||||
|
buffer);
|
||||||
|
if (ret < 0)
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (remain_size) {
|
||||||
|
buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
|
||||||
|
ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
|
||||||
|
buffer);
|
||||||
|
if (ret < 0)
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rome_download_firmware(struct hci_dev *hdev,
|
||||||
|
struct rome_config *config)
|
||||||
|
{
|
||||||
|
const struct firmware *fw;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
|
||||||
|
|
||||||
|
ret = request_firmware(&fw, config->fwname, &hdev->dev);
|
||||||
|
if (ret) {
|
||||||
|
BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
|
||||||
|
config->fwname, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
rome_tlv_check_data(config, fw);
|
||||||
|
|
||||||
|
ret = rome_tlv_download_request(hdev, fw);
|
||||||
|
if (ret) {
|
||||||
|
BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
|
||||||
|
config->fwname, ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
release_firmware(fw);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
u8 cmd[9];
|
||||||
|
int err;
|
||||||
|
|
||||||
|
cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
|
||||||
|
cmd[1] = 0x02; /* TAG ID */
|
||||||
|
cmd[2] = sizeof(bdaddr_t); /* size */
|
||||||
|
memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
|
||||||
|
skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
|
||||||
|
HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
|
||||||
|
if (IS_ERR(skb)) {
|
||||||
|
err = PTR_ERR(skb);
|
||||||
|
BT_ERR("%s: Change address command failed (%d)",
|
||||||
|
hdev->name, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
|
||||||
|
|
||||||
|
int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
|
||||||
|
{
|
||||||
|
u32 rome_ver = 0;
|
||||||
|
struct rome_config config;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
BT_DBG("%s: ROME setup on UART", hdev->name);
|
||||||
|
|
||||||
|
config.user_baud_rate = baudrate;
|
||||||
|
|
||||||
|
/* Get ROME version information */
|
||||||
|
err = rome_patch_ver_req(hdev, &rome_ver);
|
||||||
|
if (err < 0 || rome_ver == 0) {
|
||||||
|
BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
|
||||||
|
|
||||||
|
/* Download rampatch file */
|
||||||
|
config.type = TLV_TYPE_PATCH;
|
||||||
|
snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
|
||||||
|
rome_ver);
|
||||||
|
err = rome_download_firmware(hdev, &config);
|
||||||
|
if (err < 0) {
|
||||||
|
BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Download NVM configuration */
|
||||||
|
config.type = TLV_TYPE_NVM;
|
||||||
|
snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
|
||||||
|
rome_ver);
|
||||||
|
err = rome_download_firmware(hdev, &config);
|
||||||
|
if (err < 0) {
|
||||||
|
BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Perform HCI reset */
|
||||||
|
err = rome_reset(hdev);
|
||||||
|
if (err < 0) {
|
||||||
|
BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
BT_INFO("%s: ROME setup on UART is completed", hdev->name);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
|
||||||
|
|
||||||
|
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
|
||||||
|
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
|
||||||
|
MODULE_VERSION(VERSION);
|
||||||
|
MODULE_LICENSE("GPL");
|
135
drivers/bluetooth/btqca.h
Normal file
135
drivers/bluetooth/btqca.h
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
/*
|
||||||
|
* Bluetooth supports for Qualcomm Atheros ROME chips
|
||||||
|
*
|
||||||
|
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2
|
||||||
|
* as published by the Free Software Foundation
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define EDL_PATCH_CMD_OPCODE (0xFC00)
|
||||||
|
#define EDL_NVM_ACCESS_OPCODE (0xFC0B)
|
||||||
|
#define EDL_PATCH_CMD_LEN (1)
|
||||||
|
#define EDL_PATCH_VER_REQ_CMD (0x19)
|
||||||
|
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
|
||||||
|
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
|
||||||
|
#define MAX_SIZE_PER_TLV_SEGMENT (243)
|
||||||
|
|
||||||
|
#define EDL_CMD_REQ_RES_EVT (0x00)
|
||||||
|
#define EDL_PATCH_VER_RES_EVT (0x19)
|
||||||
|
#define EDL_APP_VER_RES_EVT (0x02)
|
||||||
|
#define EDL_TVL_DNLD_RES_EVT (0x04)
|
||||||
|
#define EDL_CMD_EXE_STATUS_EVT (0x00)
|
||||||
|
#define EDL_SET_BAUDRATE_RSP_EVT (0x92)
|
||||||
|
#define EDL_NVM_ACCESS_CODE_EVT (0x0B)
|
||||||
|
|
||||||
|
#define EDL_TAG_ID_HCI (17)
|
||||||
|
#define EDL_TAG_ID_DEEP_SLEEP (27)
|
||||||
|
|
||||||
|
enum qca_bardrate {
|
||||||
|
QCA_BAUDRATE_115200 = 0,
|
||||||
|
QCA_BAUDRATE_57600,
|
||||||
|
QCA_BAUDRATE_38400,
|
||||||
|
QCA_BAUDRATE_19200,
|
||||||
|
QCA_BAUDRATE_9600,
|
||||||
|
QCA_BAUDRATE_230400,
|
||||||
|
QCA_BAUDRATE_250000,
|
||||||
|
QCA_BAUDRATE_460800,
|
||||||
|
QCA_BAUDRATE_500000,
|
||||||
|
QCA_BAUDRATE_720000,
|
||||||
|
QCA_BAUDRATE_921600,
|
||||||
|
QCA_BAUDRATE_1000000,
|
||||||
|
QCA_BAUDRATE_1250000,
|
||||||
|
QCA_BAUDRATE_2000000,
|
||||||
|
QCA_BAUDRATE_3000000,
|
||||||
|
QCA_BAUDRATE_4000000,
|
||||||
|
QCA_BAUDRATE_1600000,
|
||||||
|
QCA_BAUDRATE_3200000,
|
||||||
|
QCA_BAUDRATE_3500000,
|
||||||
|
QCA_BAUDRATE_AUTO = 0xFE,
|
||||||
|
QCA_BAUDRATE_RESERVED
|
||||||
|
};
|
||||||
|
|
||||||
|
enum rome_tlv_type {
|
||||||
|
TLV_TYPE_PATCH = 1,
|
||||||
|
TLV_TYPE_NVM
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rome_config {
|
||||||
|
u8 type;
|
||||||
|
char fwname[64];
|
||||||
|
uint8_t user_baud_rate;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct edl_event_hdr {
|
||||||
|
__u8 cresp;
|
||||||
|
__u8 rtype;
|
||||||
|
__u8 data[0];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct rome_version {
|
||||||
|
__le32 product_id;
|
||||||
|
__le16 patch_ver;
|
||||||
|
__le16 rome_ver;
|
||||||
|
__le32 soc_id;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct tlv_seg_resp {
|
||||||
|
__u8 result;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct tlv_type_patch {
|
||||||
|
__le32 total_size;
|
||||||
|
__le32 data_length;
|
||||||
|
__u8 format_version;
|
||||||
|
__u8 signature;
|
||||||
|
__le16 reserved1;
|
||||||
|
__le16 product_id;
|
||||||
|
__le16 rom_build;
|
||||||
|
__le16 patch_version;
|
||||||
|
__le16 reserved2;
|
||||||
|
__le32 entry;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct tlv_type_nvm {
|
||||||
|
__le16 tag_id;
|
||||||
|
__le16 tag_len;
|
||||||
|
__le32 reserve1;
|
||||||
|
__le32 reserve2;
|
||||||
|
__u8 data[0];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct tlv_type_hdr {
|
||||||
|
__le32 type_len;
|
||||||
|
__u8 data[0];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_BT_QCA)
|
||||||
|
|
||||||
|
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
|
||||||
|
int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||||
|
{
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
|
||||||
|
{
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -322,6 +322,9 @@ static const struct usb_device_id blacklist_table[] = {
|
|||||||
{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
|
{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
|
||||||
{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
|
{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
|
||||||
|
|
||||||
|
/* Silicon Wave based devices */
|
||||||
|
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
|
||||||
|
|
||||||
{ } /* Terminating entry */
|
{ } /* Terminating entry */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -25,6 +25,12 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/acpi.h>
|
||||||
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/clk.h>
|
||||||
|
#include <linux/gpio/consumer.h>
|
||||||
|
#include <linux/tty.h>
|
||||||
|
|
||||||
#include <net/bluetooth/bluetooth.h>
|
#include <net/bluetooth/bluetooth.h>
|
||||||
#include <net/bluetooth/hci_core.h>
|
#include <net/bluetooth/hci_core.h>
|
||||||
@ -32,11 +38,37 @@
|
|||||||
#include "btbcm.h"
|
#include "btbcm.h"
|
||||||
#include "hci_uart.h"
|
#include "hci_uart.h"
|
||||||
|
|
||||||
struct bcm_data {
|
struct bcm_device {
|
||||||
struct sk_buff *rx_skb;
|
struct list_head list;
|
||||||
struct sk_buff_head txq;
|
|
||||||
|
struct platform_device *pdev;
|
||||||
|
|
||||||
|
const char *name;
|
||||||
|
struct gpio_desc *device_wakeup;
|
||||||
|
struct gpio_desc *shutdown;
|
||||||
|
|
||||||
|
struct clk *clk;
|
||||||
|
bool clk_enabled;
|
||||||
|
|
||||||
|
u32 init_speed;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
struct hci_uart *hu;
|
||||||
|
bool is_suspended; /* suspend/resume flag */
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct bcm_data {
|
||||||
|
struct sk_buff *rx_skb;
|
||||||
|
struct sk_buff_head txq;
|
||||||
|
|
||||||
|
struct bcm_device *dev;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* List of BCM BT UART devices */
|
||||||
|
static DEFINE_SPINLOCK(bcm_device_list_lock);
|
||||||
|
static LIST_HEAD(bcm_device_list);
|
||||||
|
|
||||||
static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
|
static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
|
||||||
{
|
{
|
||||||
struct hci_dev *hdev = hu->hdev;
|
struct hci_dev *hdev = hu->hdev;
|
||||||
@ -86,9 +118,41 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* bcm_device_exists should be protected by bcm_device_list_lock */
|
||||||
|
static bool bcm_device_exists(struct bcm_device *device)
|
||||||
|
{
|
||||||
|
struct list_head *p;
|
||||||
|
|
||||||
|
list_for_each(p, &bcm_device_list) {
|
||||||
|
struct bcm_device *dev = list_entry(p, struct bcm_device, list);
|
||||||
|
|
||||||
|
if (device == dev)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
|
||||||
|
{
|
||||||
|
if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
|
||||||
|
clk_enable(dev->clk);
|
||||||
|
|
||||||
|
gpiod_set_value_cansleep(dev->shutdown, powered);
|
||||||
|
gpiod_set_value_cansleep(dev->device_wakeup, powered);
|
||||||
|
|
||||||
|
if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
|
||||||
|
clk_disable(dev->clk);
|
||||||
|
|
||||||
|
dev->clk_enabled = powered;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int bcm_open(struct hci_uart *hu)
|
static int bcm_open(struct hci_uart *hu)
|
||||||
{
|
{
|
||||||
struct bcm_data *bcm;
|
struct bcm_data *bcm;
|
||||||
|
struct list_head *p;
|
||||||
|
|
||||||
BT_DBG("hu %p", hu);
|
BT_DBG("hu %p", hu);
|
||||||
|
|
||||||
@ -99,6 +163,30 @@ static int bcm_open(struct hci_uart *hu)
|
|||||||
skb_queue_head_init(&bcm->txq);
|
skb_queue_head_init(&bcm->txq);
|
||||||
|
|
||||||
hu->priv = bcm;
|
hu->priv = bcm;
|
||||||
|
|
||||||
|
spin_lock(&bcm_device_list_lock);
|
||||||
|
list_for_each(p, &bcm_device_list) {
|
||||||
|
struct bcm_device *dev = list_entry(p, struct bcm_device, list);
|
||||||
|
|
||||||
|
/* Retrieve saved bcm_device based on parent of the
|
||||||
|
* platform device (saved during device probe) and
|
||||||
|
* parent of tty device used by hci_uart
|
||||||
|
*/
|
||||||
|
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
|
||||||
|
bcm->dev = dev;
|
||||||
|
hu->init_speed = dev->init_speed;
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
dev->hu = hu;
|
||||||
|
#endif
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bcm->dev)
|
||||||
|
bcm_gpio_set_power(bcm->dev, true);
|
||||||
|
|
||||||
|
spin_unlock(&bcm_device_list_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -108,6 +196,16 @@ static int bcm_close(struct hci_uart *hu)
|
|||||||
|
|
||||||
BT_DBG("hu %p", hu);
|
BT_DBG("hu %p", hu);
|
||||||
|
|
||||||
|
/* Protect bcm->dev against removal of the device or driver */
|
||||||
|
spin_lock(&bcm_device_list_lock);
|
||||||
|
if (bcm_device_exists(bcm->dev)) {
|
||||||
|
bcm_gpio_set_power(bcm->dev, false);
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
bcm->dev->hu = NULL;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
spin_unlock(&bcm_device_list_lock);
|
||||||
|
|
||||||
skb_queue_purge(&bcm->txq);
|
skb_queue_purge(&bcm->txq);
|
||||||
kfree_skb(bcm->rx_skb);
|
kfree_skb(bcm->rx_skb);
|
||||||
kfree(bcm);
|
kfree(bcm);
|
||||||
@ -232,6 +330,188 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
|
|||||||
return skb_dequeue(&bcm->txq);
|
return skb_dequeue(&bcm->txq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
/* Platform suspend callback */
|
||||||
|
static int bcm_suspend(struct device *dev)
|
||||||
|
{
|
||||||
|
struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
|
||||||
|
|
||||||
|
BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
|
||||||
|
|
||||||
|
if (!bdev->is_suspended) {
|
||||||
|
hci_uart_set_flow_control(bdev->hu, true);
|
||||||
|
|
||||||
|
/* Once this callback returns, driver suspends BT via GPIO */
|
||||||
|
bdev->is_suspended = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Suspend the device */
|
||||||
|
if (bdev->device_wakeup) {
|
||||||
|
gpiod_set_value(bdev->device_wakeup, false);
|
||||||
|
BT_DBG("suspend, delaying 15 ms");
|
||||||
|
mdelay(15);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Platform resume callback */
|
||||||
|
static int bcm_resume(struct device *dev)
|
||||||
|
{
|
||||||
|
struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
|
||||||
|
|
||||||
|
BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
|
||||||
|
|
||||||
|
if (bdev->device_wakeup) {
|
||||||
|
gpiod_set_value(bdev->device_wakeup, true);
|
||||||
|
BT_DBG("resume, delaying 15 ms");
|
||||||
|
mdelay(15);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When this callback executes, the device has woken up already */
|
||||||
|
if (bdev->is_suspended) {
|
||||||
|
bdev->is_suspended = false;
|
||||||
|
|
||||||
|
hci_uart_set_flow_control(bdev->hu, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
|
||||||
|
static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
|
||||||
|
|
||||||
|
static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
|
||||||
|
{ "device-wakeup-gpios", &device_wakeup_gpios, 1 },
|
||||||
|
{ "shutdown-gpios", &shutdown_gpios, 1 },
|
||||||
|
{ },
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI
|
||||||
|
static int bcm_resource(struct acpi_resource *ares, void *data)
|
||||||
|
{
|
||||||
|
struct bcm_device *dev = data;
|
||||||
|
|
||||||
|
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
|
||||||
|
struct acpi_resource_uart_serialbus *sb;
|
||||||
|
|
||||||
|
sb = &ares->data.uart_serial_bus;
|
||||||
|
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
|
||||||
|
dev->init_speed = sb->default_baud_rate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Always tell the ACPI core to skip this resource */
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bcm_acpi_probe(struct bcm_device *dev)
|
||||||
|
{
|
||||||
|
struct platform_device *pdev = dev->pdev;
|
||||||
|
const struct acpi_device_id *id;
|
||||||
|
struct acpi_device *adev;
|
||||||
|
LIST_HEAD(resources);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
|
||||||
|
if (!id)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Retrieve GPIO data */
|
||||||
|
dev->name = dev_name(&pdev->dev);
|
||||||
|
ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
|
||||||
|
acpi_bcm_default_gpios);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
dev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||||
|
|
||||||
|
dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
|
||||||
|
"device-wakeup",
|
||||||
|
GPIOD_OUT_LOW);
|
||||||
|
if (IS_ERR(dev->device_wakeup))
|
||||||
|
return PTR_ERR(dev->device_wakeup);
|
||||||
|
|
||||||
|
dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
|
||||||
|
GPIOD_OUT_LOW);
|
||||||
|
if (IS_ERR(dev->shutdown))
|
||||||
|
return PTR_ERR(dev->shutdown);
|
||||||
|
|
||||||
|
/* Make sure at-least one of the GPIO is defined and that
|
||||||
|
* a name is specified for this instance
|
||||||
|
*/
|
||||||
|
if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
|
||||||
|
dev_err(&pdev->dev, "invalid platform data\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Retrieve UART ACPI info */
|
||||||
|
adev = ACPI_COMPANION(&dev->pdev->dev);
|
||||||
|
if (!adev)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static int bcm_acpi_probe(struct bcm_device *dev)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_ACPI */
|
||||||
|
|
||||||
|
static int bcm_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct bcm_device *dev;
|
||||||
|
struct acpi_device_id *pdata = pdev->dev.platform_data;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
|
||||||
|
if (!dev)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
dev->pdev = pdev;
|
||||||
|
|
||||||
|
if (ACPI_HANDLE(&pdev->dev)) {
|
||||||
|
ret = bcm_acpi_probe(dev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
} else if (pdata) {
|
||||||
|
dev->name = pdata->id;
|
||||||
|
} else {
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, dev);
|
||||||
|
|
||||||
|
dev_info(&pdev->dev, "%s device registered.\n", dev->name);
|
||||||
|
|
||||||
|
/* Place this instance on the device list */
|
||||||
|
spin_lock(&bcm_device_list_lock);
|
||||||
|
list_add_tail(&dev->list, &bcm_device_list);
|
||||||
|
spin_unlock(&bcm_device_list_lock);
|
||||||
|
|
||||||
|
bcm_gpio_set_power(dev, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bcm_remove(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct bcm_device *dev = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
|
spin_lock(&bcm_device_list_lock);
|
||||||
|
list_del(&dev->list);
|
||||||
|
spin_unlock(&bcm_device_list_lock);
|
||||||
|
|
||||||
|
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
|
||||||
|
|
||||||
|
dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct hci_uart_proto bcm_proto = {
|
static const struct hci_uart_proto bcm_proto = {
|
||||||
.id = HCI_UART_BCM,
|
.id = HCI_UART_BCM,
|
||||||
.name = "BCM",
|
.name = "BCM",
|
||||||
@ -247,12 +527,38 @@ static const struct hci_uart_proto bcm_proto = {
|
|||||||
.dequeue = bcm_dequeue,
|
.dequeue = bcm_dequeue,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI
|
||||||
|
static const struct acpi_device_id bcm_acpi_match[] = {
|
||||||
|
{ "BCM2E39", 0 },
|
||||||
|
{ "BCM2E67", 0 },
|
||||||
|
{ },
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Platform suspend and resume callbacks */
|
||||||
|
static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
|
||||||
|
|
||||||
|
static struct platform_driver bcm_driver = {
|
||||||
|
.probe = bcm_probe,
|
||||||
|
.remove = bcm_remove,
|
||||||
|
.driver = {
|
||||||
|
.name = "hci_bcm",
|
||||||
|
.acpi_match_table = ACPI_PTR(bcm_acpi_match),
|
||||||
|
.pm = &bcm_pm_ops,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
int __init bcm_init(void)
|
int __init bcm_init(void)
|
||||||
{
|
{
|
||||||
|
platform_driver_register(&bcm_driver);
|
||||||
|
|
||||||
return hci_uart_register_proto(&bcm_proto);
|
return hci_uart_register_proto(&bcm_proto);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __exit bcm_deinit(void)
|
int __exit bcm_deinit(void)
|
||||||
{
|
{
|
||||||
|
platform_driver_unregister(&bcm_driver);
|
||||||
|
|
||||||
return hci_uart_unregister_proto(&bcm_proto);
|
return hci_uart_unregister_proto(&bcm_proto);
|
||||||
}
|
}
|
||||||
|
@ -810,6 +810,9 @@ static int __init hci_uart_init(void)
|
|||||||
#ifdef CONFIG_BT_HCIUART_BCM
|
#ifdef CONFIG_BT_HCIUART_BCM
|
||||||
bcm_init();
|
bcm_init();
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_BT_HCIUART_QCA
|
||||||
|
qca_init();
|
||||||
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -839,6 +842,9 @@ static void __exit hci_uart_exit(void)
|
|||||||
#ifdef CONFIG_BT_HCIUART_BCM
|
#ifdef CONFIG_BT_HCIUART_BCM
|
||||||
bcm_deinit();
|
bcm_deinit();
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_BT_HCIUART_QCA
|
||||||
|
qca_deinit();
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Release tty registration of line discipline */
|
/* Release tty registration of line discipline */
|
||||||
err = tty_unregister_ldisc(N_HCI);
|
err = tty_unregister_ldisc(N_HCI);
|
||||||
|
969
drivers/bluetooth/hci_qca.c
Normal file
969
drivers/bluetooth/hci_qca.c
Normal file
@ -0,0 +1,969 @@
|
|||||||
|
/*
|
||||||
|
* Bluetooth Software UART Qualcomm protocol
|
||||||
|
*
|
||||||
|
* HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
|
||||||
|
* protocol extension to H4.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2007 Texas Instruments, Inc.
|
||||||
|
* Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
|
||||||
|
*
|
||||||
|
* Acknowledgements:
|
||||||
|
* This file is based on hci_ll.c, which was...
|
||||||
|
* Written by Ohad Ben-Cohen <ohad@bencohen.org>
|
||||||
|
* which was in turn based on hci_h4.c, which was written
|
||||||
|
* by Maxim Krasnyansky and Marcel Holtmann.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2
|
||||||
|
* as published by the Free Software Foundation
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/debugfs.h>
|
||||||
|
|
||||||
|
#include <net/bluetooth/bluetooth.h>
|
||||||
|
#include <net/bluetooth/hci_core.h>
|
||||||
|
|
||||||
|
#include "hci_uart.h"
|
||||||
|
#include "btqca.h"
|
||||||
|
|
||||||
|
/* HCI_IBS protocol messages */
|
||||||
|
#define HCI_IBS_SLEEP_IND 0xFE
|
||||||
|
#define HCI_IBS_WAKE_IND 0xFD
|
||||||
|
#define HCI_IBS_WAKE_ACK 0xFC
|
||||||
|
#define HCI_MAX_IBS_SIZE 10
|
||||||
|
|
||||||
|
/* Controller states */
|
||||||
|
#define STATE_IN_BAND_SLEEP_ENABLED 1
|
||||||
|
|
||||||
|
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
|
||||||
|
#define IBS_TX_IDLE_TIMEOUT_MS 2000
|
||||||
|
#define BAUDRATE_SETTLE_TIMEOUT_MS 300
|
||||||
|
|
||||||
|
/* HCI_IBS transmit side sleep protocol states */
|
||||||
|
enum tx_ibs_states {
|
||||||
|
HCI_IBS_TX_ASLEEP,
|
||||||
|
HCI_IBS_TX_WAKING,
|
||||||
|
HCI_IBS_TX_AWAKE,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* HCI_IBS receive side sleep protocol states */
|
||||||
|
enum rx_states {
|
||||||
|
HCI_IBS_RX_ASLEEP,
|
||||||
|
HCI_IBS_RX_AWAKE,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* HCI_IBS transmit and receive side clock state vote */
|
||||||
|
enum hci_ibs_clock_state_vote {
|
||||||
|
HCI_IBS_VOTE_STATS_UPDATE,
|
||||||
|
HCI_IBS_TX_VOTE_CLOCK_ON,
|
||||||
|
HCI_IBS_TX_VOTE_CLOCK_OFF,
|
||||||
|
HCI_IBS_RX_VOTE_CLOCK_ON,
|
||||||
|
HCI_IBS_RX_VOTE_CLOCK_OFF,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct qca_data {
|
||||||
|
struct hci_uart *hu;
|
||||||
|
struct sk_buff *rx_skb;
|
||||||
|
struct sk_buff_head txq;
|
||||||
|
struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
|
||||||
|
spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
|
||||||
|
u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
|
||||||
|
u8 rx_ibs_state; /* HCI_IBS receive side power state */
|
||||||
|
u32 tx_vote; /* Clock must be on for TX */
|
||||||
|
u32 rx_vote; /* Clock must be on for RX */
|
||||||
|
struct timer_list tx_idle_timer;
|
||||||
|
u32 tx_idle_delay;
|
||||||
|
struct timer_list wake_retrans_timer;
|
||||||
|
u32 wake_retrans;
|
||||||
|
struct workqueue_struct *workqueue;
|
||||||
|
struct work_struct ws_awake_rx;
|
||||||
|
struct work_struct ws_awake_device;
|
||||||
|
struct work_struct ws_rx_vote_off;
|
||||||
|
struct work_struct ws_tx_vote_off;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/* For debugging purpose */
|
||||||
|
u64 ibs_sent_wacks;
|
||||||
|
u64 ibs_sent_slps;
|
||||||
|
u64 ibs_sent_wakes;
|
||||||
|
u64 ibs_recv_wacks;
|
||||||
|
u64 ibs_recv_slps;
|
||||||
|
u64 ibs_recv_wakes;
|
||||||
|
u64 vote_last_jif;
|
||||||
|
u32 vote_on_ms;
|
||||||
|
u32 vote_off_ms;
|
||||||
|
u64 tx_votes_on;
|
||||||
|
u64 rx_votes_on;
|
||||||
|
u64 tx_votes_off;
|
||||||
|
u64 rx_votes_off;
|
||||||
|
u64 votes_on;
|
||||||
|
u64 votes_off;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void __serial_clock_on(struct tty_struct *tty)
|
||||||
|
{
|
||||||
|
/* TODO: Some chipset requires to enable UART clock on client
|
||||||
|
* side to save power consumption or manual work is required.
|
||||||
|
* Please put your code to control UART clock here if needed
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __serial_clock_off(struct tty_struct *tty)
|
||||||
|
{
|
||||||
|
/* TODO: Some chipset requires to disable UART clock on client
|
||||||
|
* side to save power consumption or manual work is required.
|
||||||
|
* Please put your code to control UART clock off here if needed
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/* serial_clock_vote needs to be called with the ibs lock held */
|
||||||
|
static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
unsigned int diff;
|
||||||
|
|
||||||
|
bool old_vote = (qca->tx_vote | qca->rx_vote);
|
||||||
|
bool new_vote;
|
||||||
|
|
||||||
|
switch (vote) {
|
||||||
|
case HCI_IBS_VOTE_STATS_UPDATE:
|
||||||
|
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
|
||||||
|
|
||||||
|
if (old_vote)
|
||||||
|
qca->vote_off_ms += diff;
|
||||||
|
else
|
||||||
|
qca->vote_on_ms += diff;
|
||||||
|
return;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_VOTE_CLOCK_ON:
|
||||||
|
qca->tx_vote = true;
|
||||||
|
qca->tx_votes_on++;
|
||||||
|
new_vote = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_RX_VOTE_CLOCK_ON:
|
||||||
|
qca->rx_vote = true;
|
||||||
|
qca->rx_votes_on++;
|
||||||
|
new_vote = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_VOTE_CLOCK_OFF:
|
||||||
|
qca->tx_vote = false;
|
||||||
|
qca->tx_votes_off++;
|
||||||
|
new_vote = qca->rx_vote | qca->tx_vote;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_RX_VOTE_CLOCK_OFF:
|
||||||
|
qca->rx_vote = false;
|
||||||
|
qca->rx_votes_off++;
|
||||||
|
new_vote = qca->rx_vote | qca->tx_vote;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
BT_ERR("Voting irregularity");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_vote != old_vote) {
|
||||||
|
if (new_vote)
|
||||||
|
__serial_clock_on(hu->tty);
|
||||||
|
else
|
||||||
|
__serial_clock_off(hu->tty);
|
||||||
|
|
||||||
|
BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
|
||||||
|
vote? "true" : "false");
|
||||||
|
|
||||||
|
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
|
||||||
|
|
||||||
|
if (new_vote) {
|
||||||
|
qca->votes_on++;
|
||||||
|
qca->vote_off_ms += diff;
|
||||||
|
} else {
|
||||||
|
qca->votes_off++;
|
||||||
|
qca->vote_on_ms += diff;
|
||||||
|
}
|
||||||
|
qca->vote_last_jif = jiffies;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Builds and sends an HCI_IBS command packet.
|
||||||
|
* These are very simple packets with only 1 cmd byte.
|
||||||
|
*/
|
||||||
|
static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
struct sk_buff *skb = NULL;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
|
||||||
|
|
||||||
|
skb = bt_skb_alloc(1, GFP_ATOMIC);
|
||||||
|
if (!skb) {
|
||||||
|
BT_ERR("Failed to allocate memory for HCI_IBS packet");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Assign HCI_IBS type */
|
||||||
|
*skb_put(skb, 1) = cmd;
|
||||||
|
|
||||||
|
skb_queue_tail(&qca->txq, skb);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qca_wq_awake_device(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = container_of(work, struct qca_data,
|
||||||
|
ws_awake_device);
|
||||||
|
struct hci_uart *hu = qca->hu;
|
||||||
|
unsigned long retrans_delay;
|
||||||
|
|
||||||
|
BT_DBG("hu %p wq awake device", hu);
|
||||||
|
|
||||||
|
/* Vote for serial clock */
|
||||||
|
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
|
||||||
|
|
||||||
|
spin_lock(&qca->hci_ibs_lock);
|
||||||
|
|
||||||
|
/* Send wake indication to device */
|
||||||
|
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
|
||||||
|
BT_ERR("Failed to send WAKE to device");
|
||||||
|
|
||||||
|
qca->ibs_sent_wakes++;
|
||||||
|
|
||||||
|
/* Start retransmit timer */
|
||||||
|
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
|
||||||
|
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
|
||||||
|
|
||||||
|
spin_unlock(&qca->hci_ibs_lock);
|
||||||
|
|
||||||
|
/* Actually send the packets */
|
||||||
|
hci_uart_tx_wakeup(hu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qca_wq_awake_rx(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = container_of(work, struct qca_data,
|
||||||
|
ws_awake_rx);
|
||||||
|
struct hci_uart *hu = qca->hu;
|
||||||
|
|
||||||
|
BT_DBG("hu %p wq awake rx", hu);
|
||||||
|
|
||||||
|
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
|
||||||
|
|
||||||
|
spin_lock(&qca->hci_ibs_lock);
|
||||||
|
qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
|
||||||
|
|
||||||
|
/* Always acknowledge device wake up,
|
||||||
|
* sending IBS message doesn't count as TX ON.
|
||||||
|
*/
|
||||||
|
if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
|
||||||
|
BT_ERR("Failed to acknowledge device wake up");
|
||||||
|
|
||||||
|
qca->ibs_sent_wacks++;
|
||||||
|
|
||||||
|
spin_unlock(&qca->hci_ibs_lock);
|
||||||
|
|
||||||
|
/* Actually send the packets */
|
||||||
|
hci_uart_tx_wakeup(hu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = container_of(work, struct qca_data,
|
||||||
|
ws_rx_vote_off);
|
||||||
|
struct hci_uart *hu = qca->hu;
|
||||||
|
|
||||||
|
BT_DBG("hu %p rx clock vote off", hu);
|
||||||
|
|
||||||
|
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = container_of(work, struct qca_data,
|
||||||
|
ws_tx_vote_off);
|
||||||
|
struct hci_uart *hu = qca->hu;
|
||||||
|
|
||||||
|
BT_DBG("hu %p tx clock vote off", hu);
|
||||||
|
|
||||||
|
/* Run HCI tx handling unlocked */
|
||||||
|
hci_uart_tx_wakeup(hu);
|
||||||
|
|
||||||
|
/* Now that message queued to tty driver, vote for tty clocks off.
|
||||||
|
* It is up to the tty driver to pend the clocks off until tx done.
|
||||||
|
*/
|
||||||
|
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hci_ibs_tx_idle_timeout(unsigned long arg)
|
||||||
|
{
|
||||||
|
struct hci_uart *hu = (struct hci_uart *)arg;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
|
||||||
|
|
||||||
|
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
|
||||||
|
flags, SINGLE_DEPTH_NESTING);
|
||||||
|
|
||||||
|
switch (qca->tx_ibs_state) {
|
||||||
|
case HCI_IBS_TX_AWAKE:
|
||||||
|
/* TX_IDLE, go to SLEEP */
|
||||||
|
if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
|
||||||
|
BT_ERR("Failed to send SLEEP to device");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
|
||||||
|
qca->ibs_sent_slps++;
|
||||||
|
queue_work(qca->workqueue, &qca->ws_tx_vote_off);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_ASLEEP:
|
||||||
|
case HCI_IBS_TX_WAKING:
|
||||||
|
/* Fall through */
|
||||||
|
|
||||||
|
default:
|
||||||
|
BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hci_ibs_wake_retrans_timeout(unsigned long arg)
|
||||||
|
{
|
||||||
|
struct hci_uart *hu = (struct hci_uart *)arg;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
unsigned long flags, retrans_delay;
|
||||||
|
unsigned long retransmit = 0;
|
||||||
|
|
||||||
|
BT_DBG("hu %p wake retransmit timeout in %d state",
|
||||||
|
hu, qca->tx_ibs_state);
|
||||||
|
|
||||||
|
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
|
||||||
|
flags, SINGLE_DEPTH_NESTING);
|
||||||
|
|
||||||
|
switch (qca->tx_ibs_state) {
|
||||||
|
case HCI_IBS_TX_WAKING:
|
||||||
|
/* No WAKE_ACK, retransmit WAKE */
|
||||||
|
retransmit = 1;
|
||||||
|
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
|
||||||
|
BT_ERR("Failed to acknowledge device wake up");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
qca->ibs_sent_wakes++;
|
||||||
|
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
|
||||||
|
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_ASLEEP:
|
||||||
|
case HCI_IBS_TX_AWAKE:
|
||||||
|
/* Fall through */
|
||||||
|
|
||||||
|
default:
|
||||||
|
BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
if (retransmit)
|
||||||
|
hci_uart_tx_wakeup(hu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Initialize protocol */
|
||||||
|
static int qca_open(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
struct qca_data *qca;
|
||||||
|
|
||||||
|
BT_DBG("hu %p qca_open", hu);
|
||||||
|
|
||||||
|
qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
|
||||||
|
if (!qca)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
skb_queue_head_init(&qca->txq);
|
||||||
|
skb_queue_head_init(&qca->tx_wait_q);
|
||||||
|
spin_lock_init(&qca->hci_ibs_lock);
|
||||||
|
qca->workqueue = create_singlethread_workqueue("qca_wq");
|
||||||
|
if (!qca->workqueue) {
|
||||||
|
BT_ERR("QCA Workqueue not initialized properly");
|
||||||
|
kfree(qca);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
|
||||||
|
INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
|
||||||
|
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
|
||||||
|
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
|
||||||
|
|
||||||
|
qca->hu = hu;
|
||||||
|
|
||||||
|
/* Assume we start with both sides asleep -- extra wakes OK */
|
||||||
|
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
|
||||||
|
qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
|
||||||
|
|
||||||
|
/* clocks actually on, but we start votes off */
|
||||||
|
qca->tx_vote = false;
|
||||||
|
qca->rx_vote = false;
|
||||||
|
qca->flags = 0;
|
||||||
|
|
||||||
|
qca->ibs_sent_wacks = 0;
|
||||||
|
qca->ibs_sent_slps = 0;
|
||||||
|
qca->ibs_sent_wakes = 0;
|
||||||
|
qca->ibs_recv_wacks = 0;
|
||||||
|
qca->ibs_recv_slps = 0;
|
||||||
|
qca->ibs_recv_wakes = 0;
|
||||||
|
qca->vote_last_jif = jiffies;
|
||||||
|
qca->vote_on_ms = 0;
|
||||||
|
qca->vote_off_ms = 0;
|
||||||
|
qca->votes_on = 0;
|
||||||
|
qca->votes_off = 0;
|
||||||
|
qca->tx_votes_on = 0;
|
||||||
|
qca->tx_votes_off = 0;
|
||||||
|
qca->rx_votes_on = 0;
|
||||||
|
qca->rx_votes_off = 0;
|
||||||
|
|
||||||
|
hu->priv = qca;
|
||||||
|
|
||||||
|
init_timer(&qca->wake_retrans_timer);
|
||||||
|
qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
|
||||||
|
qca->wake_retrans_timer.data = (u_long)hu;
|
||||||
|
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
|
||||||
|
|
||||||
|
init_timer(&qca->tx_idle_timer);
|
||||||
|
qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
|
||||||
|
qca->tx_idle_timer.data = (u_long)hu;
|
||||||
|
qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
|
||||||
|
|
||||||
|
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
|
||||||
|
qca->tx_idle_delay, qca->wake_retrans);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qca_debugfs_init(struct hci_dev *hdev)
|
||||||
|
{
|
||||||
|
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
struct dentry *ibs_dir;
|
||||||
|
umode_t mode;
|
||||||
|
|
||||||
|
if (!hdev->debugfs)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
|
||||||
|
|
||||||
|
/* read only */
|
||||||
|
mode = S_IRUGO;
|
||||||
|
debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
|
||||||
|
debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
|
||||||
|
debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
|
||||||
|
&qca->ibs_sent_slps);
|
||||||
|
debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
|
||||||
|
&qca->ibs_sent_wakes);
|
||||||
|
debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
|
||||||
|
&qca->ibs_sent_wacks);
|
||||||
|
debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
|
||||||
|
&qca->ibs_recv_slps);
|
||||||
|
debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
|
||||||
|
&qca->ibs_recv_wakes);
|
||||||
|
debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
|
||||||
|
&qca->ibs_recv_wacks);
|
||||||
|
debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
|
||||||
|
debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
|
||||||
|
debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
|
||||||
|
debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
|
||||||
|
debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
|
||||||
|
debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
|
||||||
|
debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
|
||||||
|
debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
|
||||||
|
debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
|
||||||
|
debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
|
||||||
|
|
||||||
|
/* read/write */
|
||||||
|
mode = S_IRUGO | S_IWUSR;
|
||||||
|
debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
|
||||||
|
debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
|
||||||
|
&qca->tx_idle_delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Flush protocol data */
|
||||||
|
static int qca_flush(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
BT_DBG("hu %p qca flush", hu);
|
||||||
|
|
||||||
|
skb_queue_purge(&qca->tx_wait_q);
|
||||||
|
skb_queue_purge(&qca->txq);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Close protocol */
|
||||||
|
static int qca_close(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
BT_DBG("hu %p qca close", hu);
|
||||||
|
|
||||||
|
serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
|
||||||
|
|
||||||
|
skb_queue_purge(&qca->tx_wait_q);
|
||||||
|
skb_queue_purge(&qca->txq);
|
||||||
|
del_timer(&qca->tx_idle_timer);
|
||||||
|
del_timer(&qca->wake_retrans_timer);
|
||||||
|
destroy_workqueue(qca->workqueue);
|
||||||
|
qca->hu = NULL;
|
||||||
|
|
||||||
|
kfree_skb(qca->rx_skb);
|
||||||
|
|
||||||
|
hu->priv = NULL;
|
||||||
|
|
||||||
|
kfree(qca);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Called upon a wake-up-indication from the device.
|
||||||
|
*/
|
||||||
|
static void device_want_to_wakeup(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
BT_DBG("hu %p want to wake up", hu);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
qca->ibs_recv_wakes++;
|
||||||
|
|
||||||
|
switch (qca->rx_ibs_state) {
|
||||||
|
case HCI_IBS_RX_ASLEEP:
|
||||||
|
/* Make sure clock is on - we may have turned clock off since
|
||||||
|
* receiving the wake up indicator awake rx clock.
|
||||||
|
*/
|
||||||
|
queue_work(qca->workqueue, &qca->ws_awake_rx);
|
||||||
|
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||||
|
return;
|
||||||
|
|
||||||
|
case HCI_IBS_RX_AWAKE:
|
||||||
|
/* Always acknowledge device wake up,
|
||||||
|
* sending IBS message doesn't count as TX ON.
|
||||||
|
*/
|
||||||
|
if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
|
||||||
|
BT_ERR("Failed to acknowledge device wake up");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
qca->ibs_sent_wacks++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* Any other state is illegal */
|
||||||
|
BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
|
||||||
|
qca->rx_ibs_state);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
/* Actually send the packets */
|
||||||
|
hci_uart_tx_wakeup(hu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Called upon a sleep-indication from the device.
|
||||||
|
*/
|
||||||
|
static void device_want_to_sleep(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
BT_DBG("hu %p want to sleep", hu);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
qca->ibs_recv_slps++;
|
||||||
|
|
||||||
|
switch (qca->rx_ibs_state) {
|
||||||
|
case HCI_IBS_RX_AWAKE:
|
||||||
|
/* Update state */
|
||||||
|
qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
|
||||||
|
/* Vote off rx clock under workqueue */
|
||||||
|
queue_work(qca->workqueue, &qca->ws_rx_vote_off);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_RX_ASLEEP:
|
||||||
|
/* Fall through */
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* Any other state is illegal */
|
||||||
|
BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
|
||||||
|
qca->rx_ibs_state);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Called upon wake-up-acknowledgement from the device
|
||||||
|
*/
|
||||||
|
static void device_woke_up(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
unsigned long flags, idle_delay;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
struct sk_buff *skb = NULL;
|
||||||
|
|
||||||
|
BT_DBG("hu %p woke up", hu);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
qca->ibs_recv_wacks++;
|
||||||
|
|
||||||
|
switch (qca->tx_ibs_state) {
|
||||||
|
case HCI_IBS_TX_AWAKE:
|
||||||
|
/* Expect one if we send 2 WAKEs */
|
||||||
|
BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
|
||||||
|
qca->tx_ibs_state);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_WAKING:
|
||||||
|
/* Send pending packets */
|
||||||
|
while ((skb = skb_dequeue(&qca->tx_wait_q)))
|
||||||
|
skb_queue_tail(&qca->txq, skb);
|
||||||
|
|
||||||
|
/* Switch timers and change state to HCI_IBS_TX_AWAKE */
|
||||||
|
del_timer(&qca->wake_retrans_timer);
|
||||||
|
idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
|
||||||
|
mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
|
||||||
|
qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_ASLEEP:
|
||||||
|
/* Fall through */
|
||||||
|
|
||||||
|
default:
|
||||||
|
BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
|
||||||
|
qca->tx_ibs_state);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
/* Actually send the packets */
|
||||||
|
hci_uart_tx_wakeup(hu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Enqueue frame for transmittion (padding, crc, etc) may be called from
|
||||||
|
* two simultaneous tasklets.
|
||||||
|
*/
|
||||||
|
static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
unsigned long flags = 0, idle_delay;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
|
||||||
|
qca->tx_ibs_state);
|
||||||
|
|
||||||
|
/* Prepend skb with frame type */
|
||||||
|
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||||
|
|
||||||
|
/* Don't go to sleep in middle of patch download or
|
||||||
|
* Out-Of-Band(GPIOs control) sleep is selected.
|
||||||
|
*/
|
||||||
|
if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
|
||||||
|
skb_queue_tail(&qca->txq, skb);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
/* Act according to current state */
|
||||||
|
switch (qca->tx_ibs_state) {
|
||||||
|
case HCI_IBS_TX_AWAKE:
|
||||||
|
BT_DBG("Device awake, sending normally");
|
||||||
|
skb_queue_tail(&qca->txq, skb);
|
||||||
|
idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
|
||||||
|
mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_ASLEEP:
|
||||||
|
BT_DBG("Device asleep, waking up and queueing packet");
|
||||||
|
/* Save packet for later */
|
||||||
|
skb_queue_tail(&qca->tx_wait_q, skb);
|
||||||
|
|
||||||
|
qca->tx_ibs_state = HCI_IBS_TX_WAKING;
|
||||||
|
/* Schedule a work queue to wake up device */
|
||||||
|
queue_work(qca->workqueue, &qca->ws_awake_device);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HCI_IBS_TX_WAKING:
|
||||||
|
BT_DBG("Device waking up, queueing packet");
|
||||||
|
/* Transient state; just keep packet for later */
|
||||||
|
skb_queue_tail(&qca->tx_wait_q, skb);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
BT_ERR("Illegal tx state: %d (losing packet)",
|
||||||
|
qca->tx_ibs_state);
|
||||||
|
kfree_skb(skb);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||||
|
|
||||||
|
BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
|
||||||
|
|
||||||
|
device_want_to_sleep(hu);
|
||||||
|
|
||||||
|
kfree_skb(skb);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||||
|
|
||||||
|
BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
|
||||||
|
|
||||||
|
device_want_to_wakeup(hu);
|
||||||
|
|
||||||
|
kfree_skb(skb);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||||
|
|
||||||
|
BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
|
||||||
|
|
||||||
|
device_woke_up(hu);
|
||||||
|
|
||||||
|
kfree_skb(skb);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define QCA_IBS_SLEEP_IND_EVENT \
|
||||||
|
.type = HCI_IBS_SLEEP_IND, \
|
||||||
|
.hlen = 0, \
|
||||||
|
.loff = 0, \
|
||||||
|
.lsize = 0, \
|
||||||
|
.maxlen = HCI_MAX_IBS_SIZE
|
||||||
|
|
||||||
|
#define QCA_IBS_WAKE_IND_EVENT \
|
||||||
|
.type = HCI_IBS_WAKE_IND, \
|
||||||
|
.hlen = 0, \
|
||||||
|
.loff = 0, \
|
||||||
|
.lsize = 0, \
|
||||||
|
.maxlen = HCI_MAX_IBS_SIZE
|
||||||
|
|
||||||
|
#define QCA_IBS_WAKE_ACK_EVENT \
|
||||||
|
.type = HCI_IBS_WAKE_ACK, \
|
||||||
|
.hlen = 0, \
|
||||||
|
.loff = 0, \
|
||||||
|
.lsize = 0, \
|
||||||
|
.maxlen = HCI_MAX_IBS_SIZE
|
||||||
|
|
||||||
|
static const struct h4_recv_pkt qca_recv_pkts[] = {
|
||||||
|
{ H4_RECV_ACL, .recv = hci_recv_frame },
|
||||||
|
{ H4_RECV_SCO, .recv = hci_recv_frame },
|
||||||
|
{ H4_RECV_EVENT, .recv = hci_recv_frame },
|
||||||
|
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
|
||||||
|
{ QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
|
||||||
|
{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
|
||||||
|
};
|
||||||
|
|
||||||
|
static int qca_recv(struct hci_uart *hu, const void *data, int count)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||||
|
return -EUNATCH;
|
||||||
|
|
||||||
|
qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
|
||||||
|
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
|
||||||
|
if (IS_ERR(qca->rx_skb)) {
|
||||||
|
int err = PTR_ERR(qca->rx_skb);
|
||||||
|
BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
|
||||||
|
qca->rx_skb = NULL;
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *qca_dequeue(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
|
||||||
|
return skb_dequeue(&qca->txq);
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint8_t qca_get_baudrate_value(int speed)
|
||||||
|
{
|
||||||
|
switch(speed) {
|
||||||
|
case 9600:
|
||||||
|
return QCA_BAUDRATE_9600;
|
||||||
|
case 19200:
|
||||||
|
return QCA_BAUDRATE_19200;
|
||||||
|
case 38400:
|
||||||
|
return QCA_BAUDRATE_38400;
|
||||||
|
case 57600:
|
||||||
|
return QCA_BAUDRATE_57600;
|
||||||
|
case 115200:
|
||||||
|
return QCA_BAUDRATE_115200;
|
||||||
|
case 230400:
|
||||||
|
return QCA_BAUDRATE_230400;
|
||||||
|
case 460800:
|
||||||
|
return QCA_BAUDRATE_460800;
|
||||||
|
case 500000:
|
||||||
|
return QCA_BAUDRATE_500000;
|
||||||
|
case 921600:
|
||||||
|
return QCA_BAUDRATE_921600;
|
||||||
|
case 1000000:
|
||||||
|
return QCA_BAUDRATE_1000000;
|
||||||
|
case 2000000:
|
||||||
|
return QCA_BAUDRATE_2000000;
|
||||||
|
case 3000000:
|
||||||
|
return QCA_BAUDRATE_3000000;
|
||||||
|
case 3500000:
|
||||||
|
return QCA_BAUDRATE_3500000;
|
||||||
|
default:
|
||||||
|
return QCA_BAUDRATE_115200;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
|
||||||
|
{
|
||||||
|
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
struct sk_buff *skb;
|
||||||
|
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
|
||||||
|
|
||||||
|
if (baudrate > QCA_BAUDRATE_3000000)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
cmd[4] = baudrate;
|
||||||
|
|
||||||
|
skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
|
||||||
|
if (!skb) {
|
||||||
|
BT_ERR("Failed to allocate memory for baudrate packet");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Assign commands to change baudrate and packet type. */
|
||||||
|
memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
|
||||||
|
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
|
||||||
|
|
||||||
|
skb_queue_tail(&qca->txq, skb);
|
||||||
|
hci_uart_tx_wakeup(hu);
|
||||||
|
|
||||||
|
/* wait 300ms to change new baudrate on controller side
|
||||||
|
* controller will come back after they receive this HCI command
|
||||||
|
* then host can communicate with new baudrate to controller
|
||||||
|
*/
|
||||||
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
|
schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
|
||||||
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qca_setup(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
struct hci_dev *hdev = hu->hdev;
|
||||||
|
struct qca_data *qca = hu->priv;
|
||||||
|
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
BT_INFO("%s: ROME setup", hdev->name);
|
||||||
|
|
||||||
|
/* Patch downloading has to be done without IBS mode */
|
||||||
|
clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
|
||||||
|
|
||||||
|
/* Setup initial baudrate */
|
||||||
|
speed = 0;
|
||||||
|
if (hu->init_speed)
|
||||||
|
speed = hu->init_speed;
|
||||||
|
else if (hu->proto->init_speed)
|
||||||
|
speed = hu->proto->init_speed;
|
||||||
|
|
||||||
|
if (speed)
|
||||||
|
hci_uart_set_baudrate(hu, speed);
|
||||||
|
|
||||||
|
/* Setup user speed if needed */
|
||||||
|
speed = 0;
|
||||||
|
if (hu->oper_speed)
|
||||||
|
speed = hu->oper_speed;
|
||||||
|
else if (hu->proto->oper_speed)
|
||||||
|
speed = hu->proto->oper_speed;
|
||||||
|
|
||||||
|
if (speed) {
|
||||||
|
qca_baudrate = qca_get_baudrate_value(speed);
|
||||||
|
|
||||||
|
BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
|
||||||
|
ret = qca_set_baudrate(hdev, qca_baudrate);
|
||||||
|
if (ret) {
|
||||||
|
BT_ERR("%s: Failed to change the baud rate (%d)",
|
||||||
|
hdev->name, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
hci_uart_set_baudrate(hu, speed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup patch / NVM configurations */
|
||||||
|
ret = qca_uart_setup_rome(hdev, qca_baudrate);
|
||||||
|
if (!ret) {
|
||||||
|
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
|
||||||
|
qca_debugfs_init(hdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup bdaddr */
|
||||||
|
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct hci_uart_proto qca_proto = {
|
||||||
|
.id = HCI_UART_QCA,
|
||||||
|
.name = "QCA",
|
||||||
|
.init_speed = 115200,
|
||||||
|
.oper_speed = 3000000,
|
||||||
|
.open = qca_open,
|
||||||
|
.close = qca_close,
|
||||||
|
.flush = qca_flush,
|
||||||
|
.setup = qca_setup,
|
||||||
|
.recv = qca_recv,
|
||||||
|
.enqueue = qca_enqueue,
|
||||||
|
.dequeue = qca_dequeue,
|
||||||
|
};
|
||||||
|
|
||||||
|
int __init qca_init(void)
|
||||||
|
{
|
||||||
|
return hci_uart_register_proto(&qca_proto);
|
||||||
|
}
|
||||||
|
|
||||||
|
int __exit qca_deinit(void)
|
||||||
|
{
|
||||||
|
return hci_uart_unregister_proto(&qca_proto);
|
||||||
|
}
|
@ -35,7 +35,7 @@
|
|||||||
#define HCIUARTGETFLAGS _IOR('U', 204, int)
|
#define HCIUARTGETFLAGS _IOR('U', 204, int)
|
||||||
|
|
||||||
/* UART protocols */
|
/* UART protocols */
|
||||||
#define HCI_UART_MAX_PROTO 8
|
#define HCI_UART_MAX_PROTO 9
|
||||||
|
|
||||||
#define HCI_UART_H4 0
|
#define HCI_UART_H4 0
|
||||||
#define HCI_UART_BCSP 1
|
#define HCI_UART_BCSP 1
|
||||||
@ -45,6 +45,7 @@
|
|||||||
#define HCI_UART_ATH3K 5
|
#define HCI_UART_ATH3K 5
|
||||||
#define HCI_UART_INTEL 6
|
#define HCI_UART_INTEL 6
|
||||||
#define HCI_UART_BCM 7
|
#define HCI_UART_BCM 7
|
||||||
|
#define HCI_UART_QCA 8
|
||||||
|
|
||||||
#define HCI_UART_RAW_DEVICE 0
|
#define HCI_UART_RAW_DEVICE 0
|
||||||
#define HCI_UART_RESET_ON_INIT 1
|
#define HCI_UART_RESET_ON_INIT 1
|
||||||
@ -176,3 +177,8 @@ int intel_deinit(void);
|
|||||||
int bcm_init(void);
|
int bcm_init(void);
|
||||||
int bcm_deinit(void);
|
int bcm_deinit(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_BT_HCIUART_QCA
|
||||||
|
int qca_init(void);
|
||||||
|
int qca_deinit(void);
|
||||||
|
#endif
|
||||||
|
@ -97,9 +97,7 @@ struct at86rf230_local {
|
|||||||
|
|
||||||
struct at86rf230_state_change irq;
|
struct at86rf230_state_change irq;
|
||||||
|
|
||||||
bool tx_aret;
|
|
||||||
unsigned long cal_timeout;
|
unsigned long cal_timeout;
|
||||||
s8 max_frame_retries;
|
|
||||||
bool is_tx;
|
bool is_tx;
|
||||||
bool is_tx_from_off;
|
bool is_tx_from_off;
|
||||||
u8 tx_retry;
|
u8 tx_retry;
|
||||||
@ -651,7 +649,7 @@ at86rf230_tx_complete(void *context)
|
|||||||
|
|
||||||
enable_irq(ctx->irq);
|
enable_irq(ctx->irq);
|
||||||
|
|
||||||
ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
|
ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -760,17 +758,10 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp)
|
|||||||
{
|
{
|
||||||
if (lp->is_tx) {
|
if (lp->is_tx) {
|
||||||
lp->is_tx = 0;
|
lp->is_tx = 0;
|
||||||
|
at86rf230_async_state_change(lp, &lp->irq,
|
||||||
if (lp->tx_aret)
|
STATE_FORCE_TX_ON,
|
||||||
at86rf230_async_state_change(lp, &lp->irq,
|
at86rf230_tx_trac_status,
|
||||||
STATE_FORCE_TX_ON,
|
true);
|
||||||
at86rf230_tx_trac_status,
|
|
||||||
true);
|
|
||||||
else
|
|
||||||
at86rf230_async_state_change(lp, &lp->irq,
|
|
||||||
STATE_RX_AACK_ON,
|
|
||||||
at86rf230_tx_complete,
|
|
||||||
true);
|
|
||||||
} else {
|
} else {
|
||||||
at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
|
at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
|
||||||
at86rf230_rx_trac_check, true);
|
at86rf230_rx_trac_check, true);
|
||||||
@ -876,24 +867,16 @@ at86rf230_xmit_start(void *context)
|
|||||||
struct at86rf230_state_change *ctx = context;
|
struct at86rf230_state_change *ctx = context;
|
||||||
struct at86rf230_local *lp = ctx->lp;
|
struct at86rf230_local *lp = ctx->lp;
|
||||||
|
|
||||||
/* In ARET mode we need to go into STATE_TX_ARET_ON after we
|
/* check if we change from off state */
|
||||||
* are in STATE_TX_ON. The pfad differs here, so we change
|
if (lp->is_tx_from_off) {
|
||||||
* the complete handler.
|
lp->is_tx_from_off = false;
|
||||||
*/
|
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
|
||||||
if (lp->tx_aret) {
|
at86rf230_write_frame,
|
||||||
if (lp->is_tx_from_off) {
|
false);
|
||||||
lp->is_tx_from_off = false;
|
|
||||||
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
|
|
||||||
at86rf230_write_frame,
|
|
||||||
false);
|
|
||||||
} else {
|
|
||||||
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
|
|
||||||
at86rf230_xmit_tx_on,
|
|
||||||
false);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
|
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
|
||||||
at86rf230_write_frame, false);
|
at86rf230_xmit_tx_on,
|
||||||
|
false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1267,15 +1250,8 @@ static int
|
|||||||
at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
|
at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
|
||||||
{
|
{
|
||||||
struct at86rf230_local *lp = hw->priv;
|
struct at86rf230_local *lp = hw->priv;
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
lp->tx_aret = retries >= 0;
|
return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
|
||||||
lp->max_frame_retries = retries;
|
|
||||||
|
|
||||||
if (retries >= 0)
|
|
||||||
rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -833,6 +833,7 @@ static int cc2520_get_platform_data(struct spi_device *spi,
|
|||||||
if (!spi_pdata)
|
if (!spi_pdata)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
*pdata = *spi_pdata;
|
*pdata = *spi_pdata;
|
||||||
|
priv->fifo_pin = pdata->fifo;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,6 +197,27 @@
|
|||||||
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
|
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
|
||||||
#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
|
#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
|
||||||
|
|
||||||
|
#define LOWPAN_PRIV_SIZE(llpriv_size) \
|
||||||
|
(sizeof(struct lowpan_priv) + llpriv_size)
|
||||||
|
|
||||||
|
enum lowpan_lltypes {
|
||||||
|
LOWPAN_LLTYPE_BTLE,
|
||||||
|
LOWPAN_LLTYPE_IEEE802154,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct lowpan_priv {
|
||||||
|
enum lowpan_lltypes lltype;
|
||||||
|
|
||||||
|
/* must be last */
|
||||||
|
u8 priv[0] __aligned(sizeof(void *));
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline
|
||||||
|
struct lowpan_priv *lowpan_priv(const struct net_device *dev)
|
||||||
|
{
|
||||||
|
return netdev_priv(dev);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
/* print data in line */
|
/* print data in line */
|
||||||
static inline void raw_dump_inline(const char *caller, char *msg,
|
static inline void raw_dump_inline(const char *caller, char *msg,
|
||||||
@ -372,6 +393,8 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
|
|||||||
return skb->len + uncomp_header - ret;
|
return skb->len + uncomp_header - ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
|
||||||
|
|
||||||
int
|
int
|
||||||
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
||||||
const u8 *saddr, const u8 saddr_type,
|
const u8 *saddr, const u8 saddr_type,
|
||||||
|
@ -512,9 +512,11 @@ struct hci_conn_params {
|
|||||||
HCI_AUTO_CONN_DIRECT,
|
HCI_AUTO_CONN_DIRECT,
|
||||||
HCI_AUTO_CONN_ALWAYS,
|
HCI_AUTO_CONN_ALWAYS,
|
||||||
HCI_AUTO_CONN_LINK_LOSS,
|
HCI_AUTO_CONN_LINK_LOSS,
|
||||||
|
HCI_AUTO_CONN_EXPLICIT,
|
||||||
} auto_connect;
|
} auto_connect;
|
||||||
|
|
||||||
struct hci_conn *conn;
|
struct hci_conn *conn;
|
||||||
|
bool explicit_connect;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct list_head hci_dev_list;
|
extern struct list_head hci_dev_list;
|
||||||
@ -639,6 +641,7 @@ enum {
|
|||||||
HCI_CONN_DROP,
|
HCI_CONN_DROP,
|
||||||
HCI_CONN_PARAM_REMOVAL_PEND,
|
HCI_CONN_PARAM_REMOVAL_PEND,
|
||||||
HCI_CONN_NEW_LINK_KEY,
|
HCI_CONN_NEW_LINK_KEY,
|
||||||
|
HCI_CONN_SCANNING,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
|
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
|
||||||
@ -808,6 +811,26 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
|
||||||
|
{
|
||||||
|
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||||
|
struct hci_conn *c;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(c, &h->list, list) {
|
||||||
|
if (c->type == LE_LINK && c->state == BT_CONNECT &&
|
||||||
|
!test_bit(HCI_CONN_SCANNING, &c->flags)) {
|
||||||
|
rcu_read_unlock();
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
int hci_disconnect(struct hci_conn *conn, __u8 reason);
|
int hci_disconnect(struct hci_conn *conn, __u8 reason);
|
||||||
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
|
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
|
||||||
void hci_sco_setup(struct hci_conn *conn, __u8 status);
|
void hci_sco_setup(struct hci_conn *conn, __u8 status);
|
||||||
@ -823,6 +846,9 @@ void hci_chan_del(struct hci_chan *chan);
|
|||||||
void hci_chan_list_flush(struct hci_conn *conn);
|
void hci_chan_list_flush(struct hci_conn *conn);
|
||||||
struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
|
struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
|
||||||
|
|
||||||
|
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||||
|
u8 dst_type, u8 sec_level,
|
||||||
|
u16 conn_timeout, u8 role);
|
||||||
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||||
u8 dst_type, u8 sec_level, u16 conn_timeout,
|
u8 dst_type, u8 sec_level, u16 conn_timeout,
|
||||||
u8 role);
|
u8 role);
|
||||||
@ -988,6 +1014,9 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
|
|||||||
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
||||||
bdaddr_t *addr,
|
bdaddr_t *addr,
|
||||||
u8 addr_type);
|
u8 addr_type);
|
||||||
|
struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
|
||||||
|
bdaddr_t *addr,
|
||||||
|
u8 addr_type);
|
||||||
|
|
||||||
void hci_uuids_clear(struct hci_dev *hdev);
|
void hci_uuids_clear(struct hci_dev *hdev);
|
||||||
|
|
||||||
|
@ -63,6 +63,8 @@ struct cfg802154_ops {
|
|||||||
s8 max_frame_retries);
|
s8 max_frame_retries);
|
||||||
int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
|
int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
|
||||||
struct wpan_dev *wpan_dev, bool mode);
|
struct wpan_dev *wpan_dev, bool mode);
|
||||||
|
int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
|
||||||
|
struct wpan_dev *wpan_dev, bool ackreq);
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
@ -173,6 +175,9 @@ struct wpan_dev {
|
|||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
|
|
||||||
|
/* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
|
||||||
|
struct net_device *lowpan_dev;
|
||||||
|
|
||||||
u32 identifier;
|
u32 identifier;
|
||||||
|
|
||||||
/* MAC PIB */
|
/* MAC PIB */
|
||||||
@ -193,6 +198,9 @@ struct wpan_dev {
|
|||||||
bool lbt;
|
bool lbt;
|
||||||
|
|
||||||
bool promiscuous_mode;
|
bool promiscuous_mode;
|
||||||
|
|
||||||
|
/* fallback for acknowledgment bit setting */
|
||||||
|
bool ackreq;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
|
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
|
||||||
|
@ -52,6 +52,8 @@ enum nl802154_commands {
|
|||||||
|
|
||||||
NL802154_CMD_SET_LBT_MODE,
|
NL802154_CMD_SET_LBT_MODE,
|
||||||
|
|
||||||
|
NL802154_CMD_SET_ACKREQ_DEFAULT,
|
||||||
|
|
||||||
/* add new commands above here */
|
/* add new commands above here */
|
||||||
|
|
||||||
/* used to define NL802154_CMD_MAX below */
|
/* used to define NL802154_CMD_MAX below */
|
||||||
@ -104,6 +106,8 @@ enum nl802154_attrs {
|
|||||||
|
|
||||||
NL802154_ATTR_SUPPORTED_COMMANDS,
|
NL802154_ATTR_SUPPORTED_COMMANDS,
|
||||||
|
|
||||||
|
NL802154_ATTR_ACKREQ_DEFAULT,
|
||||||
|
|
||||||
/* add attributes here, update the policy in nl802154.c */
|
/* add attributes here, update the policy in nl802154.c */
|
||||||
|
|
||||||
__NL802154_ATTR_AFTER_LAST,
|
__NL802154_ATTR_AFTER_LAST,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
obj-$(CONFIG_6LOWPAN) += 6lowpan.o
|
obj-$(CONFIG_6LOWPAN) += 6lowpan.o
|
||||||
|
|
||||||
6lowpan-y := iphc.o nhc.o
|
6lowpan-y := core.o iphc.o nhc.o
|
||||||
|
|
||||||
#rfc6282 nhcs
|
#rfc6282 nhcs
|
||||||
obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
|
obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
|
||||||
|
40
net/6lowpan/core.c
Normal file
40
net/6lowpan/core.c
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
/* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* Authors:
|
||||||
|
* (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
#include <net/6lowpan.h>
|
||||||
|
|
||||||
|
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
|
||||||
|
{
|
||||||
|
lowpan_priv(dev)->lltype = lltype;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(lowpan_netdev_setup);
|
||||||
|
|
||||||
|
static int __init lowpan_module_init(void)
|
||||||
|
{
|
||||||
|
request_module_nowait("ipv6");
|
||||||
|
|
||||||
|
request_module_nowait("nhc_dest");
|
||||||
|
request_module_nowait("nhc_fragment");
|
||||||
|
request_module_nowait("nhc_hop");
|
||||||
|
request_module_nowait("nhc_ipv6");
|
||||||
|
request_module_nowait("nhc_mobility");
|
||||||
|
request_module_nowait("nhc_routing");
|
||||||
|
request_module_nowait("nhc_udp");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
module_init(lowpan_module_init);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
@ -48,7 +48,6 @@
|
|||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/if_arp.h>
|
#include <linux/if_arp.h>
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
#include <net/6lowpan.h>
|
#include <net/6lowpan.h>
|
||||||
#include <net/ipv6.h>
|
#include <net/ipv6.h>
|
||||||
@ -284,7 +283,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
|
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
|
hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
|
||||||
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
|
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
|
||||||
skb_pull(skb, 2);
|
skb_pull(skb, 2);
|
||||||
break;
|
break;
|
||||||
@ -610,21 +609,3 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(lowpan_header_compress);
|
EXPORT_SYMBOL_GPL(lowpan_header_compress);
|
||||||
|
|
||||||
static int __init lowpan_module_init(void)
|
|
||||||
{
|
|
||||||
request_module_nowait("ipv6");
|
|
||||||
|
|
||||||
request_module_nowait("nhc_dest");
|
|
||||||
request_module_nowait("nhc_fragment");
|
|
||||||
request_module_nowait("nhc_hop");
|
|
||||||
request_module_nowait("nhc_ipv6");
|
|
||||||
request_module_nowait("nhc_mobility");
|
|
||||||
request_module_nowait("nhc_routing");
|
|
||||||
request_module_nowait("nhc_udp");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
module_init(lowpan_module_init);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
|
@ -85,7 +85,7 @@ struct lowpan_dev {
|
|||||||
|
|
||||||
static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
|
static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
|
||||||
{
|
{
|
||||||
return netdev_priv(netdev);
|
return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
|
static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
|
||||||
@ -848,8 +848,9 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
|
|||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
|
netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
|
||||||
NET_NAME_UNKNOWN, netdev_setup);
|
IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
|
||||||
|
netdev_setup);
|
||||||
if (!netdev)
|
if (!netdev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -859,7 +860,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
|
|||||||
SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
|
SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
|
||||||
SET_NETDEV_DEVTYPE(netdev, &bt_type);
|
SET_NETDEV_DEVTYPE(netdev, &bt_type);
|
||||||
|
|
||||||
*dev = netdev_priv(netdev);
|
*dev = lowpan_dev(netdev);
|
||||||
(*dev)->netdev = netdev;
|
(*dev)->netdev = netdev;
|
||||||
(*dev)->hdev = chan->conn->hcon->hdev;
|
(*dev)->hdev = chan->conn->hcon->hdev;
|
||||||
INIT_LIST_HEAD(&(*dev)->peers);
|
INIT_LIST_HEAD(&(*dev)->peers);
|
||||||
@ -869,6 +870,8 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
|
|||||||
list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
|
list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
|
||||||
spin_unlock(&devices_lock);
|
spin_unlock(&devices_lock);
|
||||||
|
|
||||||
|
lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
|
||||||
|
|
||||||
err = register_netdev(netdev);
|
err = register_netdev(netdev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
BT_INFO("register_netdev failed %d", err);
|
BT_INFO("register_netdev failed %d", err);
|
||||||
|
@ -379,7 +379,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
|
|||||||
amp_ctrl_put(ctrl);
|
amp_ctrl_put(ctrl);
|
||||||
|
|
||||||
hci_req_init(&req, hdev);
|
hci_req_init(&req, hdev);
|
||||||
hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp);
|
hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
|
||||||
hci_req_run_skb(&req, write_remote_amp_assoc_complete);
|
hci_req_run_skb(&req, write_remote_amp_assoc_complete);
|
||||||
|
|
||||||
kfree(cp);
|
kfree(cp);
|
||||||
|
@ -64,6 +64,48 @@ static void hci_le_create_connection_cancel(struct hci_conn *conn)
|
|||||||
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
|
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This function requires the caller holds hdev->lock */
|
||||||
|
static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
|
||||||
|
{
|
||||||
|
struct hci_conn_params *params;
|
||||||
|
struct smp_irk *irk;
|
||||||
|
bdaddr_t *bdaddr;
|
||||||
|
u8 bdaddr_type;
|
||||||
|
|
||||||
|
bdaddr = &conn->dst;
|
||||||
|
bdaddr_type = conn->dst_type;
|
||||||
|
|
||||||
|
/* Check if we need to convert to identity address */
|
||||||
|
irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
|
||||||
|
if (irk) {
|
||||||
|
bdaddr = &irk->bdaddr;
|
||||||
|
bdaddr_type = irk->addr_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
|
||||||
|
if (!params)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* The connection attempt was doing scan for new RPA, and is
|
||||||
|
* in scan phase. If params are not associated with any other
|
||||||
|
* autoconnect action, remove them completely. If they are, just unmark
|
||||||
|
* them as waiting for connection, by clearing explicit_connect field.
|
||||||
|
*/
|
||||||
|
if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
|
||||||
|
hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
|
||||||
|
else
|
||||||
|
params->explicit_connect = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This function requires the caller holds hdev->lock */
|
||||||
|
static void hci_connect_le_scan_remove(struct hci_conn *conn)
|
||||||
|
{
|
||||||
|
hci_connect_le_scan_cleanup(conn);
|
||||||
|
|
||||||
|
hci_conn_hash_del(conn->hdev, conn);
|
||||||
|
hci_update_background_scan(conn->hdev);
|
||||||
|
}
|
||||||
|
|
||||||
static void hci_acl_create_connection(struct hci_conn *conn)
|
static void hci_acl_create_connection(struct hci_conn *conn)
|
||||||
{
|
{
|
||||||
struct hci_dev *hdev = conn->hdev;
|
struct hci_dev *hdev = conn->hdev;
|
||||||
@ -340,8 +382,12 @@ static void hci_conn_timeout(struct work_struct *work)
|
|||||||
if (conn->out) {
|
if (conn->out) {
|
||||||
if (conn->type == ACL_LINK)
|
if (conn->type == ACL_LINK)
|
||||||
hci_acl_create_connection_cancel(conn);
|
hci_acl_create_connection_cancel(conn);
|
||||||
else if (conn->type == LE_LINK)
|
else if (conn->type == LE_LINK) {
|
||||||
hci_le_create_connection_cancel(conn);
|
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
||||||
|
hci_connect_le_scan_remove(conn);
|
||||||
|
else
|
||||||
|
hci_le_create_connection_cancel(conn);
|
||||||
|
}
|
||||||
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
|
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
|
||||||
hci_reject_sco(conn);
|
hci_reject_sco(conn);
|
||||||
}
|
}
|
||||||
@ -637,15 +683,18 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
|||||||
{
|
{
|
||||||
struct hci_conn *conn;
|
struct hci_conn *conn;
|
||||||
|
|
||||||
if (status == 0)
|
hci_dev_lock(hdev);
|
||||||
return;
|
|
||||||
|
conn = hci_lookup_le_connect(hdev);
|
||||||
|
|
||||||
|
if (!status) {
|
||||||
|
hci_connect_le_scan_cleanup(conn);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
|
BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
|
||||||
status);
|
status);
|
||||||
|
|
||||||
hci_dev_lock(hdev);
|
|
||||||
|
|
||||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
|
||||||
if (!conn)
|
if (!conn)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
@ -685,6 +734,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
|
|||||||
hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
|
hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
|
||||||
|
|
||||||
conn->state = BT_CONNECT;
|
conn->state = BT_CONNECT;
|
||||||
|
clear_bit(HCI_CONN_SCANNING, &conn->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hci_req_directed_advertising(struct hci_request *req,
|
static void hci_req_directed_advertising(struct hci_request *req,
|
||||||
@ -728,7 +778,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||||||
u8 role)
|
u8 role)
|
||||||
{
|
{
|
||||||
struct hci_conn_params *params;
|
struct hci_conn_params *params;
|
||||||
struct hci_conn *conn;
|
struct hci_conn *conn, *conn_unfinished;
|
||||||
struct smp_irk *irk;
|
struct smp_irk *irk;
|
||||||
struct hci_request req;
|
struct hci_request req;
|
||||||
int err;
|
int err;
|
||||||
@ -751,26 +801,29 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||||||
* and return the object found.
|
* and return the object found.
|
||||||
*/
|
*/
|
||||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
|
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
|
||||||
|
conn_unfinished = NULL;
|
||||||
if (conn) {
|
if (conn) {
|
||||||
conn->pending_sec_level = sec_level;
|
if (conn->state == BT_CONNECT &&
|
||||||
goto done;
|
test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
||||||
|
BT_DBG("will continue unfinished conn %pMR", dst);
|
||||||
|
conn_unfinished = conn;
|
||||||
|
} else {
|
||||||
|
if (conn->pending_sec_level < sec_level)
|
||||||
|
conn->pending_sec_level = sec_level;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Since the controller supports only one LE connection attempt at a
|
/* Since the controller supports only one LE connection attempt at a
|
||||||
* time, we return -EBUSY if there is any connection attempt running.
|
* time, we return -EBUSY if there is any connection attempt running.
|
||||||
*/
|
*/
|
||||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
if (hci_lookup_le_connect(hdev))
|
||||||
if (conn)
|
|
||||||
return ERR_PTR(-EBUSY);
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
/* When given an identity address with existing identity
|
/* When given an identity address with existing identity
|
||||||
* resolving key, the connection needs to be established
|
* resolving key, the connection needs to be established
|
||||||
* to a resolvable random address.
|
* to a resolvable random address.
|
||||||
*
|
*
|
||||||
* This uses the cached random resolvable address from
|
|
||||||
* a previous scan. When no cached address is available,
|
|
||||||
* try connecting to the identity address instead.
|
|
||||||
*
|
|
||||||
* Storing the resolvable random address is required here
|
* Storing the resolvable random address is required here
|
||||||
* to handle connection failures. The address will later
|
* to handle connection failures. The address will later
|
||||||
* be resolved back into the original identity address
|
* be resolved back into the original identity address
|
||||||
@ -782,15 +835,23 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||||||
dst_type = ADDR_LE_DEV_RANDOM;
|
dst_type = ADDR_LE_DEV_RANDOM;
|
||||||
}
|
}
|
||||||
|
|
||||||
conn = hci_conn_add(hdev, LE_LINK, dst, role);
|
if (conn_unfinished) {
|
||||||
|
conn = conn_unfinished;
|
||||||
|
bacpy(&conn->dst, dst);
|
||||||
|
} else {
|
||||||
|
conn = hci_conn_add(hdev, LE_LINK, dst, role);
|
||||||
|
}
|
||||||
|
|
||||||
if (!conn)
|
if (!conn)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
conn->dst_type = dst_type;
|
conn->dst_type = dst_type;
|
||||||
conn->sec_level = BT_SECURITY_LOW;
|
conn->sec_level = BT_SECURITY_LOW;
|
||||||
conn->pending_sec_level = sec_level;
|
|
||||||
conn->conn_timeout = conn_timeout;
|
conn->conn_timeout = conn_timeout;
|
||||||
|
|
||||||
|
if (!conn_unfinished)
|
||||||
|
conn->pending_sec_level = sec_level;
|
||||||
|
|
||||||
hci_req_init(&req, hdev);
|
hci_req_init(&req, hdev);
|
||||||
|
|
||||||
/* Disable advertising if we're active. For master role
|
/* Disable advertising if we're active. For master role
|
||||||
@ -854,6 +915,144 @@ create_conn:
|
|||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
|
/* If this is continuation of connect started by hci_connect_le_scan,
|
||||||
|
* it already called hci_conn_hold and calling it again would mess the
|
||||||
|
* counter.
|
||||||
|
*/
|
||||||
|
if (!conn_unfinished)
|
||||||
|
hci_conn_hold(conn);
|
||||||
|
|
||||||
|
return conn;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
|
||||||
|
u16 opcode)
|
||||||
|
{
|
||||||
|
struct hci_conn *conn;
|
||||||
|
|
||||||
|
if (!status)
|
||||||
|
return;
|
||||||
|
|
||||||
|
BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
|
||||||
|
status);
|
||||||
|
|
||||||
|
hci_dev_lock(hdev);
|
||||||
|
|
||||||
|
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
||||||
|
if (conn)
|
||||||
|
hci_le_conn_failed(conn, status);
|
||||||
|
|
||||||
|
hci_dev_unlock(hdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
||||||
|
{
|
||||||
|
struct hci_conn *conn;
|
||||||
|
|
||||||
|
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
|
||||||
|
if (!conn)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (conn->dst_type != type)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (conn->state != BT_CONNECTED)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This function requires the caller holds hdev->lock */
|
||||||
|
static int hci_explicit_conn_params_set(struct hci_request *req,
|
||||||
|
bdaddr_t *addr, u8 addr_type)
|
||||||
|
{
|
||||||
|
struct hci_dev *hdev = req->hdev;
|
||||||
|
struct hci_conn_params *params;
|
||||||
|
|
||||||
|
if (is_connected(hdev, addr, addr_type))
|
||||||
|
return -EISCONN;
|
||||||
|
|
||||||
|
params = hci_conn_params_add(hdev, addr, addr_type);
|
||||||
|
if (!params)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
/* If we created new params, or existing params were marked as disabled,
|
||||||
|
* mark them to be used just once to connect.
|
||||||
|
*/
|
||||||
|
if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
|
||||||
|
params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
|
||||||
|
list_del_init(¶ms->action);
|
||||||
|
list_add(¶ms->action, &hdev->pend_le_conns);
|
||||||
|
}
|
||||||
|
|
||||||
|
params->explicit_connect = true;
|
||||||
|
__hci_update_background_scan(req);
|
||||||
|
|
||||||
|
BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
|
||||||
|
params->auto_connect);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This function requires the caller holds hdev->lock */
|
||||||
|
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||||
|
u8 dst_type, u8 sec_level,
|
||||||
|
u16 conn_timeout, u8 role)
|
||||||
|
{
|
||||||
|
struct hci_conn *conn;
|
||||||
|
struct hci_request req;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/* Let's make sure that le is enabled.*/
|
||||||
|
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
|
||||||
|
if (lmp_le_capable(hdev))
|
||||||
|
return ERR_PTR(-ECONNREFUSED);
|
||||||
|
|
||||||
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Some devices send ATT messages as soon as the physical link is
|
||||||
|
* established. To be able to handle these ATT messages, the user-
|
||||||
|
* space first establishes the connection and then starts the pairing
|
||||||
|
* process.
|
||||||
|
*
|
||||||
|
* So if a hci_conn object already exists for the following connection
|
||||||
|
* attempt, we simply update pending_sec_level and auth_type fields
|
||||||
|
* and return the object found.
|
||||||
|
*/
|
||||||
|
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
|
||||||
|
if (conn) {
|
||||||
|
if (conn->pending_sec_level < sec_level)
|
||||||
|
conn->pending_sec_level = sec_level;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
BT_DBG("requesting refresh of dst_addr");
|
||||||
|
|
||||||
|
conn = hci_conn_add(hdev, LE_LINK, dst, role);
|
||||||
|
if (!conn)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
hci_req_init(&req, hdev);
|
||||||
|
|
||||||
|
if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
|
conn->state = BT_CONNECT;
|
||||||
|
set_bit(HCI_CONN_SCANNING, &conn->flags);
|
||||||
|
|
||||||
|
err = hci_req_run(&req, hci_connect_le_scan_complete);
|
||||||
|
if (err && err != -ENODATA) {
|
||||||
|
hci_conn_del(conn);
|
||||||
|
return ERR_PTR(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
conn->dst_type = dst_type;
|
||||||
|
conn->sec_level = BT_SECURITY_LOW;
|
||||||
|
conn->pending_sec_level = sec_level;
|
||||||
|
conn->conn_timeout = conn_timeout;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
hci_conn_hold(conn);
|
hci_conn_hold(conn);
|
||||||
return conn;
|
return conn;
|
||||||
|
@ -2847,6 +2847,30 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This function requires the caller holds hdev->lock */
|
||||||
|
struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
|
||||||
|
bdaddr_t *addr,
|
||||||
|
u8 addr_type)
|
||||||
|
{
|
||||||
|
struct hci_conn_params *param;
|
||||||
|
|
||||||
|
list_for_each_entry(param, &hdev->pend_le_conns, action) {
|
||||||
|
if (bacmp(¶m->addr, addr) == 0 &&
|
||||||
|
param->addr_type == addr_type &&
|
||||||
|
param->explicit_connect)
|
||||||
|
return param;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_for_each_entry(param, &hdev->pend_le_reports, action) {
|
||||||
|
if (bacmp(¶m->addr, addr) == 0 &&
|
||||||
|
param->addr_type == addr_type &&
|
||||||
|
param->explicit_connect)
|
||||||
|
return param;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* This function requires the caller holds hdev->lock */
|
/* This function requires the caller holds hdev->lock */
|
||||||
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
||||||
bdaddr_t *addr, u8 addr_type)
|
bdaddr_t *addr, u8 addr_type)
|
||||||
@ -2916,6 +2940,15 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
|
|||||||
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
|
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
|
||||||
if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
|
if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/* If trying to estabilish one time connection to disabled
|
||||||
|
* device, leave the params, but mark them as just once.
|
||||||
|
*/
|
||||||
|
if (params->explicit_connect) {
|
||||||
|
params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
list_del(¶ms->list);
|
list_del(¶ms->list);
|
||||||
kfree(params);
|
kfree(params);
|
||||||
}
|
}
|
||||||
|
@ -1059,7 +1059,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
|
|
||||||
hci_dev_set_flag(hdev, HCI_LE_ADV);
|
hci_dev_set_flag(hdev, HCI_LE_ADV);
|
||||||
|
|
||||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
conn = hci_lookup_le_connect(hdev);
|
||||||
if (conn)
|
if (conn)
|
||||||
queue_delayed_work(hdev->workqueue,
|
queue_delayed_work(hdev->workqueue,
|
||||||
&conn->le_conn_timeout,
|
&conn->le_conn_timeout,
|
||||||
@ -4447,7 +4447,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
*/
|
*/
|
||||||
hci_dev_clear_flag(hdev, HCI_LE_ADV);
|
hci_dev_clear_flag(hdev, HCI_LE_ADV);
|
||||||
|
|
||||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
conn = hci_lookup_le_connect(hdev);
|
||||||
if (!conn) {
|
if (!conn) {
|
||||||
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
|
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
|
||||||
if (!conn) {
|
if (!conn) {
|
||||||
@ -4640,42 +4640,49 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
|
|||||||
/* If we're not connectable only connect devices that we have in
|
/* If we're not connectable only connect devices that we have in
|
||||||
* our pend_le_conns list.
|
* our pend_le_conns list.
|
||||||
*/
|
*/
|
||||||
params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
params = hci_explicit_connect_lookup(hdev, addr, addr_type);
|
||||||
addr, addr_type);
|
|
||||||
if (!params)
|
if (!params)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
switch (params->auto_connect) {
|
if (!params->explicit_connect) {
|
||||||
case HCI_AUTO_CONN_DIRECT:
|
switch (params->auto_connect) {
|
||||||
/* Only devices advertising with ADV_DIRECT_IND are
|
case HCI_AUTO_CONN_DIRECT:
|
||||||
* triggering a connection attempt. This is allowing
|
/* Only devices advertising with ADV_DIRECT_IND are
|
||||||
* incoming connections from slave devices.
|
* triggering a connection attempt. This is allowing
|
||||||
*/
|
* incoming connections from slave devices.
|
||||||
if (adv_type != LE_ADV_DIRECT_IND)
|
*/
|
||||||
|
if (adv_type != LE_ADV_DIRECT_IND)
|
||||||
|
return NULL;
|
||||||
|
break;
|
||||||
|
case HCI_AUTO_CONN_ALWAYS:
|
||||||
|
/* Devices advertising with ADV_IND or ADV_DIRECT_IND
|
||||||
|
* are triggering a connection attempt. This means
|
||||||
|
* that incoming connectioms from slave device are
|
||||||
|
* accepted and also outgoing connections to slave
|
||||||
|
* devices are established when found.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
default:
|
||||||
return NULL;
|
return NULL;
|
||||||
break;
|
}
|
||||||
case HCI_AUTO_CONN_ALWAYS:
|
|
||||||
/* Devices advertising with ADV_IND or ADV_DIRECT_IND
|
|
||||||
* are triggering a connection attempt. This means
|
|
||||||
* that incoming connectioms from slave device are
|
|
||||||
* accepted and also outgoing connections to slave
|
|
||||||
* devices are established when found.
|
|
||||||
*/
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
|
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
|
||||||
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
|
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
|
||||||
if (!IS_ERR(conn)) {
|
if (!IS_ERR(conn)) {
|
||||||
/* Store the pointer since we don't really have any
|
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
|
||||||
|
* by higher layer that tried to connect, if no then
|
||||||
|
* store the pointer since we don't really have any
|
||||||
* other owner of the object besides the params that
|
* other owner of the object besides the params that
|
||||||
* triggered it. This way we can abort the connection if
|
* triggered it. This way we can abort the connection if
|
||||||
* the parameters get removed and keep the reference
|
* the parameters get removed and keep the reference
|
||||||
* count consistent once the connection is established.
|
* count consistent once the connection is established.
|
||||||
*/
|
*/
|
||||||
params->conn = hci_conn_get(conn);
|
|
||||||
|
if (!params->explicit_connect)
|
||||||
|
params->conn = hci_conn_get(conn);
|
||||||
|
|
||||||
return conn;
|
return conn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +317,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
|
|||||||
* address be updated at the next cycle.
|
* address be updated at the next cycle.
|
||||||
*/
|
*/
|
||||||
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
|
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
|
||||||
hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
|
hci_lookup_le_connect(hdev)) {
|
||||||
BT_DBG("Deferring random address update");
|
BT_DBG("Deferring random address update");
|
||||||
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
|
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
|
||||||
return;
|
return;
|
||||||
@ -479,7 +479,6 @@ void hci_update_page_scan(struct hci_dev *hdev)
|
|||||||
void __hci_update_background_scan(struct hci_request *req)
|
void __hci_update_background_scan(struct hci_request *req)
|
||||||
{
|
{
|
||||||
struct hci_dev *hdev = req->hdev;
|
struct hci_dev *hdev = req->hdev;
|
||||||
struct hci_conn *conn;
|
|
||||||
|
|
||||||
if (!test_bit(HCI_UP, &hdev->flags) ||
|
if (!test_bit(HCI_UP, &hdev->flags) ||
|
||||||
test_bit(HCI_INIT, &hdev->flags) ||
|
test_bit(HCI_INIT, &hdev->flags) ||
|
||||||
@ -529,8 +528,7 @@ void __hci_update_background_scan(struct hci_request *req)
|
|||||||
* since some controllers are not able to scan and connect at
|
* since some controllers are not able to scan and connect at
|
||||||
* the same time.
|
* the same time.
|
||||||
*/
|
*/
|
||||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
if (hci_lookup_le_connect(hdev))
|
||||||
if (conn)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* If controller is currently scanning, we stop it to ensure we
|
/* If controller is currently scanning, we stop it to ensure we
|
||||||
|
@ -7113,8 +7113,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
|||||||
else
|
else
|
||||||
role = HCI_ROLE_MASTER;
|
role = HCI_ROLE_MASTER;
|
||||||
|
|
||||||
hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
|
hcon = hci_connect_le_scan(hdev, dst, dst_type,
|
||||||
HCI_LE_CONN_TIMEOUT, role);
|
chan->sec_level,
|
||||||
|
HCI_LE_CONN_TIMEOUT,
|
||||||
|
role);
|
||||||
} else {
|
} else {
|
||||||
u8 auth_type = l2cap_get_auth_type(chan);
|
u8 auth_type = l2cap_get_auth_type(chan);
|
||||||
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
|
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
|
||||||
|
@ -3564,9 +3564,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||||||
*/
|
*/
|
||||||
hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
|
hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
|
||||||
|
|
||||||
conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
|
conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
|
||||||
sec_level, HCI_LE_CONN_TIMEOUT,
|
addr_type, sec_level,
|
||||||
HCI_ROLE_MASTER);
|
HCI_LE_CONN_TIMEOUT,
|
||||||
|
HCI_ROLE_MASTER);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ERR(conn)) {
|
if (IS_ERR(conn)) {
|
||||||
@ -4210,7 +4211,7 @@ static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
|
|||||||
/* Don't let discovery abort an outgoing connection attempt
|
/* Don't let discovery abort an outgoing connection attempt
|
||||||
* that's using directed advertising.
|
* that's using directed advertising.
|
||||||
*/
|
*/
|
||||||
if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
|
if (hci_lookup_le_connect(hdev)) {
|
||||||
*status = MGMT_STATUS_REJECTED;
|
*status = MGMT_STATUS_REJECTED;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -6107,6 +6108,12 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
|
|||||||
switch (auto_connect) {
|
switch (auto_connect) {
|
||||||
case HCI_AUTO_CONN_DISABLED:
|
case HCI_AUTO_CONN_DISABLED:
|
||||||
case HCI_AUTO_CONN_LINK_LOSS:
|
case HCI_AUTO_CONN_LINK_LOSS:
|
||||||
|
/* If auto connect is being disabled when we're trying to
|
||||||
|
* connect to device, keep connecting.
|
||||||
|
*/
|
||||||
|
if (params->explicit_connect)
|
||||||
|
list_add(¶ms->action, &hdev->pend_le_conns);
|
||||||
|
|
||||||
__hci_update_background_scan(req);
|
__hci_update_background_scan(req);
|
||||||
break;
|
break;
|
||||||
case HCI_AUTO_CONN_REPORT:
|
case HCI_AUTO_CONN_REPORT:
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
#include <net/ieee802154_netdev.h>
|
#include <net/ieee802154_netdev.h>
|
||||||
#include <net/inet_frag.h>
|
#include <net/inet_frag.h>
|
||||||
|
#include <net/6lowpan.h>
|
||||||
|
|
||||||
struct lowpan_create_arg {
|
struct lowpan_create_arg {
|
||||||
u16 tag;
|
u16 tag;
|
||||||
@ -37,26 +38,18 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct lowpan_dev_record {
|
|
||||||
struct net_device *ldev;
|
|
||||||
struct list_head list;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* private device info */
|
/* private device info */
|
||||||
struct lowpan_dev_info {
|
struct lowpan_dev_info {
|
||||||
struct net_device *real_dev; /* real WPAN device ptr */
|
struct net_device *real_dev; /* real WPAN device ptr */
|
||||||
struct mutex dev_list_mtx; /* mutex for list ops */
|
|
||||||
u16 fragment_tag;
|
u16 fragment_tag;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct
|
static inline struct
|
||||||
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
|
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
|
||||||
{
|
{
|
||||||
return netdev_priv(dev);
|
return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct list_head lowpan_devices;
|
|
||||||
|
|
||||||
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
|
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
|
||||||
void lowpan_net_frag_exit(void);
|
void lowpan_net_frag_exit(void);
|
||||||
int lowpan_net_frag_init(void);
|
int lowpan_net_frag_init(void);
|
||||||
|
@ -52,8 +52,7 @@
|
|||||||
|
|
||||||
#include "6lowpan_i.h"
|
#include "6lowpan_i.h"
|
||||||
|
|
||||||
LIST_HEAD(lowpan_devices);
|
static int open_count;
|
||||||
static int lowpan_open_count;
|
|
||||||
|
|
||||||
static struct header_ops lowpan_header_ops = {
|
static struct header_ops lowpan_header_ops = {
|
||||||
.create = lowpan_header_create,
|
.create = lowpan_header_create,
|
||||||
@ -114,7 +113,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
|
|||||||
struct nlattr *tb[], struct nlattr *data[])
|
struct nlattr *tb[], struct nlattr *data[])
|
||||||
{
|
{
|
||||||
struct net_device *real_dev;
|
struct net_device *real_dev;
|
||||||
struct lowpan_dev_record *entry;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
@ -133,67 +131,52 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
lowpan_dev_info(dev)->real_dev = real_dev;
|
if (real_dev->ieee802154_ptr->lowpan_dev) {
|
||||||
mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
|
|
||||||
|
|
||||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
|
||||||
if (!entry) {
|
|
||||||
dev_put(real_dev);
|
dev_put(real_dev);
|
||||||
lowpan_dev_info(dev)->real_dev = NULL;
|
return -EBUSY;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
entry->ldev = dev;
|
lowpan_dev_info(dev)->real_dev = real_dev;
|
||||||
|
|
||||||
/* Set the lowpan hardware address to the wpan hardware address. */
|
/* Set the lowpan hardware address to the wpan hardware address. */
|
||||||
memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
|
memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
|
||||||
|
|
||||||
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
|
lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
|
||||||
INIT_LIST_HEAD(&entry->list);
|
|
||||||
list_add_tail(&entry->list, &lowpan_devices);
|
|
||||||
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
|
|
||||||
|
|
||||||
ret = register_netdevice(dev);
|
ret = register_netdevice(dev);
|
||||||
if (ret >= 0) {
|
if (ret < 0) {
|
||||||
if (!lowpan_open_count)
|
dev_put(real_dev);
|
||||||
lowpan_rx_init();
|
return ret;
|
||||||
lowpan_open_count++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
real_dev->ieee802154_ptr->lowpan_dev = dev;
|
||||||
|
if (!open_count)
|
||||||
|
lowpan_rx_init();
|
||||||
|
|
||||||
|
open_count++;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lowpan_dellink(struct net_device *dev, struct list_head *head)
|
static void lowpan_dellink(struct net_device *dev, struct list_head *head)
|
||||||
{
|
{
|
||||||
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
|
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
|
||||||
struct net_device *real_dev = lowpan_dev->real_dev;
|
struct net_device *real_dev = lowpan_dev->real_dev;
|
||||||
struct lowpan_dev_record *entry, *tmp;
|
|
||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
lowpan_open_count--;
|
open_count--;
|
||||||
if (!lowpan_open_count)
|
|
||||||
|
if (!open_count)
|
||||||
lowpan_rx_exit();
|
lowpan_rx_exit();
|
||||||
|
|
||||||
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
|
real_dev->ieee802154_ptr->lowpan_dev = NULL;
|
||||||
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
|
unregister_netdevice(dev);
|
||||||
if (entry->ldev == dev) {
|
|
||||||
list_del(&entry->list);
|
|
||||||
kfree(entry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
|
|
||||||
|
|
||||||
mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
|
|
||||||
|
|
||||||
unregister_netdevice_queue(dev, head);
|
|
||||||
|
|
||||||
dev_put(real_dev);
|
dev_put(real_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
|
static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
|
||||||
.kind = "lowpan",
|
.kind = "lowpan",
|
||||||
.priv_size = sizeof(struct lowpan_dev_info),
|
.priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
|
||||||
.setup = lowpan_setup,
|
.setup = lowpan_setup,
|
||||||
.newlink = lowpan_newlink,
|
.newlink = lowpan_newlink,
|
||||||
.dellink = lowpan_dellink,
|
.dellink = lowpan_dellink,
|
||||||
@ -214,19 +197,21 @@ static int lowpan_device_event(struct notifier_block *unused,
|
|||||||
unsigned long event, void *ptr)
|
unsigned long event, void *ptr)
|
||||||
{
|
{
|
||||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||||
LIST_HEAD(del_list);
|
|
||||||
struct lowpan_dev_record *entry, *tmp;
|
|
||||||
|
|
||||||
if (dev->type != ARPHRD_IEEE802154)
|
if (dev->type != ARPHRD_IEEE802154)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (event == NETDEV_UNREGISTER) {
|
switch (event) {
|
||||||
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
|
case NETDEV_UNREGISTER:
|
||||||
if (lowpan_dev_info(entry->ldev)->real_dev == dev)
|
/* Check if wpan interface is unregistered that we
|
||||||
lowpan_dellink(entry->ldev, &del_list);
|
* also delete possible lowpan interfaces which belongs
|
||||||
}
|
* to the wpan interface.
|
||||||
|
*/
|
||||||
unregister_netdevice_many(&del_list);
|
if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
|
||||||
|
lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -15,36 +15,14 @@
|
|||||||
|
|
||||||
#include "6lowpan_i.h"
|
#include "6lowpan_i.h"
|
||||||
|
|
||||||
static int lowpan_give_skb_to_devices(struct sk_buff *skb,
|
static int lowpan_give_skb_to_device(struct sk_buff *skb,
|
||||||
struct net_device *dev)
|
struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct lowpan_dev_record *entry;
|
skb->dev = dev->ieee802154_ptr->lowpan_dev;
|
||||||
struct sk_buff *skb_cp;
|
|
||||||
int stat = NET_RX_SUCCESS;
|
|
||||||
|
|
||||||
skb->protocol = htons(ETH_P_IPV6);
|
skb->protocol = htons(ETH_P_IPV6);
|
||||||
skb->pkt_type = PACKET_HOST;
|
skb->pkt_type = PACKET_HOST;
|
||||||
|
|
||||||
rcu_read_lock();
|
return netif_rx(skb);
|
||||||
list_for_each_entry_rcu(entry, &lowpan_devices, list)
|
|
||||||
if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
|
|
||||||
skb_cp = skb_copy(skb, GFP_ATOMIC);
|
|
||||||
if (!skb_cp) {
|
|
||||||
kfree_skb(skb);
|
|
||||||
rcu_read_unlock();
|
|
||||||
return NET_RX_DROP;
|
|
||||||
}
|
|
||||||
|
|
||||||
skb_cp->dev = entry->ldev;
|
|
||||||
stat = netif_rx(skb_cp);
|
|
||||||
if (stat == NET_RX_DROP)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
consume_skb(skb);
|
|
||||||
|
|
||||||
return stat;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -89,6 +67,10 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
struct ieee802154_hdr hdr;
|
struct ieee802154_hdr hdr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (dev->type != ARPHRD_IEEE802154 ||
|
||||||
|
!dev->ieee802154_ptr->lowpan_dev)
|
||||||
|
goto drop;
|
||||||
|
|
||||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto drop;
|
goto drop;
|
||||||
@ -99,9 +81,6 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (skb->pkt_type == PACKET_OTHERHOST)
|
if (skb->pkt_type == PACKET_OTHERHOST)
|
||||||
goto drop_skb;
|
goto drop_skb;
|
||||||
|
|
||||||
if (dev->type != ARPHRD_IEEE802154)
|
|
||||||
goto drop_skb;
|
|
||||||
|
|
||||||
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
|
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
|
||||||
goto drop_skb;
|
goto drop_skb;
|
||||||
|
|
||||||
@ -109,7 +88,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
|
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
|
||||||
/* Pull off the 1-byte of 6lowpan header. */
|
/* Pull off the 1-byte of 6lowpan header. */
|
||||||
skb_pull(skb, 1);
|
skb_pull(skb, 1);
|
||||||
return lowpan_give_skb_to_devices(skb, NULL);
|
return lowpan_give_skb_to_device(skb, dev);
|
||||||
} else {
|
} else {
|
||||||
switch (skb->data[0] & 0xe0) {
|
switch (skb->data[0] & 0xe0) {
|
||||||
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
|
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
|
||||||
@ -117,7 +96,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto drop_skb;
|
goto drop_skb;
|
||||||
|
|
||||||
return lowpan_give_skb_to_devices(skb, NULL);
|
return lowpan_give_skb_to_device(skb, dev);
|
||||||
case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
|
case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
|
||||||
ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
|
ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
@ -125,7 +104,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto drop_skb;
|
goto drop_skb;
|
||||||
|
|
||||||
return lowpan_give_skb_to_devices(skb, NULL);
|
return lowpan_give_skb_to_device(skb, dev);
|
||||||
} else if (ret == -1) {
|
} else if (ret == -1) {
|
||||||
return NET_RX_DROP;
|
return NET_RX_DROP;
|
||||||
} else {
|
} else {
|
||||||
@ -138,7 +117,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto drop_skb;
|
goto drop_skb;
|
||||||
|
|
||||||
return lowpan_give_skb_to_devices(skb, NULL);
|
return lowpan_give_skb_to_device(skb, dev);
|
||||||
} else if (ret == -1) {
|
} else if (ret == -1) {
|
||||||
return NET_RX_DROP;
|
return NET_RX_DROP;
|
||||||
} else {
|
} else {
|
||||||
|
@ -112,7 +112,7 @@ lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
|
|||||||
|
|
||||||
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
|
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
|
||||||
if (IS_ERR(frag))
|
if (IS_ERR(frag))
|
||||||
return -PTR_ERR(frag);
|
return PTR_ERR(frag);
|
||||||
|
|
||||||
memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
|
memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
|
||||||
memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
|
memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
|
||||||
@ -224,7 +224,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
|
|||||||
} else {
|
} else {
|
||||||
da.mode = IEEE802154_ADDR_LONG;
|
da.mode = IEEE802154_ADDR_LONG;
|
||||||
da.extended_addr = ieee802154_devaddr_from_raw(daddr);
|
da.extended_addr = ieee802154_devaddr_from_raw(daddr);
|
||||||
cb->ackreq = wpan_dev->frame_retries >= 0;
|
cb->ackreq = wpan_dev->ackreq;
|
||||||
}
|
}
|
||||||
|
|
||||||
return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
|
return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
|
||||||
|
@ -230,6 +230,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
|
|||||||
[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
|
[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
|
||||||
|
|
||||||
[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
|
[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
|
||||||
|
|
||||||
|
[NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
|
||||||
};
|
};
|
||||||
|
|
||||||
/* message building helper */
|
/* message building helper */
|
||||||
@ -458,6 +460,7 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
|
|||||||
CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
|
CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
|
||||||
CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
|
CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
|
||||||
CMD(set_lbt_mode, SET_LBT_MODE);
|
CMD(set_lbt_mode, SET_LBT_MODE);
|
||||||
|
CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
|
||||||
|
|
||||||
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
|
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
|
||||||
CMD(set_tx_power, SET_TX_POWER);
|
CMD(set_tx_power, SET_TX_POWER);
|
||||||
@ -656,6 +659,10 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
|
|||||||
if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
|
if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
|
/* ackreq default behaviour */
|
||||||
|
if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
|
||||||
|
goto nla_put_failure;
|
||||||
|
|
||||||
genlmsg_end(msg, hdr);
|
genlmsg_end(msg, hdr);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1042,6 +1049,24 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
|
|||||||
return rdev_set_lbt_mode(rdev, wpan_dev, mode);
|
return rdev_set_lbt_mode(rdev, wpan_dev, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
|
||||||
|
{
|
||||||
|
struct cfg802154_registered_device *rdev = info->user_ptr[0];
|
||||||
|
struct net_device *dev = info->user_ptr[1];
|
||||||
|
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
|
||||||
|
bool ackreq;
|
||||||
|
|
||||||
|
if (netif_running(dev))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ackreq = !!nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
|
||||||
|
return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
|
||||||
|
}
|
||||||
|
|
||||||
#define NL802154_FLAG_NEED_WPAN_PHY 0x01
|
#define NL802154_FLAG_NEED_WPAN_PHY 0x01
|
||||||
#define NL802154_FLAG_NEED_NETDEV 0x02
|
#define NL802154_FLAG_NEED_NETDEV 0x02
|
||||||
#define NL802154_FLAG_NEED_RTNL 0x04
|
#define NL802154_FLAG_NEED_RTNL 0x04
|
||||||
@ -1248,6 +1273,14 @@ static const struct genl_ops nl802154_ops[] = {
|
|||||||
.internal_flags = NL802154_FLAG_NEED_NETDEV |
|
.internal_flags = NL802154_FLAG_NEED_NETDEV |
|
||||||
NL802154_FLAG_NEED_RTNL,
|
NL802154_FLAG_NEED_RTNL,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
|
||||||
|
.doit = nl802154_set_ackreq_default,
|
||||||
|
.policy = nl802154_policy,
|
||||||
|
.flags = GENL_ADMIN_PERM,
|
||||||
|
.internal_flags = NL802154_FLAG_NEED_NETDEV |
|
||||||
|
NL802154_FLAG_NEED_RTNL,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
/* initialisation/exit functions */
|
/* initialisation/exit functions */
|
||||||
|
@ -195,4 +195,17 @@ rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
|
||||||
|
struct wpan_dev *wpan_dev, bool ackreq)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
|
||||||
|
ackreq);
|
||||||
|
ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
|
||||||
|
trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __CFG802154_RDEV_OPS */
|
#endif /* __CFG802154_RDEV_OPS */
|
||||||
|
@ -275,6 +275,25 @@ TRACE_EVENT(802154_rdev_set_lbt_mode,
|
|||||||
WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
|
WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(802154_rdev_set_ackreq_default,
|
||||||
|
TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
|
||||||
|
bool ackreq),
|
||||||
|
TP_ARGS(wpan_phy, wpan_dev, ackreq),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
WPAN_PHY_ENTRY
|
||||||
|
WPAN_DEV_ENTRY
|
||||||
|
__field(bool, ackreq)
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
WPAN_PHY_ASSIGN;
|
||||||
|
WPAN_DEV_ASSIGN;
|
||||||
|
__entry->ackreq = ackreq;
|
||||||
|
),
|
||||||
|
TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
|
||||||
|
", ackreq default: %s", WPAN_PHY_PR_ARG,
|
||||||
|
WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
|
||||||
|
);
|
||||||
|
|
||||||
TRACE_EVENT(802154_rdev_return_int,
|
TRACE_EVENT(802154_rdev_return_int,
|
||||||
TP_PROTO(struct wpan_phy *wpan_phy, int ret),
|
TP_PROTO(struct wpan_phy *wpan_phy, int ret),
|
||||||
TP_ARGS(wpan_phy, ret),
|
TP_ARGS(wpan_phy, ret),
|
||||||
|
@ -209,10 +209,6 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
|
|||||||
{
|
{
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
if (wpan_dev->min_be == min_be &&
|
|
||||||
wpan_dev->max_be == max_be)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
wpan_dev->min_be = min_be;
|
wpan_dev->min_be = min_be;
|
||||||
wpan_dev->max_be = max_be;
|
wpan_dev->max_be = max_be;
|
||||||
return 0;
|
return 0;
|
||||||
@ -224,9 +220,6 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
|
|||||||
{
|
{
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
if (wpan_dev->short_addr == short_addr)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
wpan_dev->short_addr = short_addr;
|
wpan_dev->short_addr = short_addr;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -238,9 +231,6 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
|
|||||||
{
|
{
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
if (wpan_dev->csma_retries == max_csma_backoffs)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
wpan_dev->csma_retries = max_csma_backoffs;
|
wpan_dev->csma_retries = max_csma_backoffs;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -252,9 +242,6 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
|
|||||||
{
|
{
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
if (wpan_dev->frame_retries == max_frame_retries)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
wpan_dev->frame_retries = max_frame_retries;
|
wpan_dev->frame_retries = max_frame_retries;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -265,13 +252,20 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
|
|||||||
{
|
{
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
if (wpan_dev->lbt == mode)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
wpan_dev->lbt = mode;
|
wpan_dev->lbt = mode;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
|
||||||
|
struct wpan_dev *wpan_dev, bool ackreq)
|
||||||
|
{
|
||||||
|
ASSERT_RTNL();
|
||||||
|
|
||||||
|
wpan_dev->ackreq = ackreq;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
const struct cfg802154_ops mac802154_config_ops = {
|
const struct cfg802154_ops mac802154_config_ops = {
|
||||||
.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
|
.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
|
||||||
.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
|
.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
|
||||||
@ -289,4 +283,5 @@ const struct cfg802154_ops mac802154_config_ops = {
|
|||||||
.set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
|
.set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
|
||||||
.set_max_frame_retries = ieee802154_set_max_frame_retries,
|
.set_max_frame_retries = ieee802154_set_max_frame_retries,
|
||||||
.set_lbt_mode = ieee802154_set_lbt_mode,
|
.set_lbt_mode = ieee802154_set_lbt_mode,
|
||||||
|
.set_ackreq_default = ieee802154_set_ackreq_default,
|
||||||
};
|
};
|
||||||
|
@ -125,6 +125,14 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
|
|||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
|
/* lowpan need to be down for update
|
||||||
|
* SLAAC address after ifup
|
||||||
|
*/
|
||||||
|
if (sdata->wpan_dev.lowpan_dev) {
|
||||||
|
if (netif_running(sdata->wpan_dev.lowpan_dev))
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
|
ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
|
||||||
if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
|
if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -132,6 +140,13 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
|
|||||||
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
||||||
sdata->wpan_dev.extended_addr = extended_addr;
|
sdata->wpan_dev.extended_addr = extended_addr;
|
||||||
|
|
||||||
|
/* update lowpan interface mac address when
|
||||||
|
* wpan mac has been changed
|
||||||
|
*/
|
||||||
|
if (sdata->wpan_dev.lowpan_dev)
|
||||||
|
memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
|
||||||
|
dev->addr_len);
|
||||||
|
|
||||||
return mac802154_wpan_update_llsec(dev);
|
return mac802154_wpan_update_llsec(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -483,8 +498,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
|
|||||||
wpan_dev->min_be = 3;
|
wpan_dev->min_be = 3;
|
||||||
wpan_dev->max_be = 5;
|
wpan_dev->max_be = 5;
|
||||||
wpan_dev->csma_retries = 4;
|
wpan_dev->csma_retries = 4;
|
||||||
/* for compatibility, actual default is 3 */
|
wpan_dev->frame_retries = 3;
|
||||||
wpan_dev->frame_retries = -1;
|
|
||||||
|
|
||||||
wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
|
wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
|
||||||
wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
|
wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
|
||||||
|
@ -111,7 +111,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
|
|||||||
phy->supported.max_minbe = 8;
|
phy->supported.max_minbe = 8;
|
||||||
phy->supported.min_maxbe = 3;
|
phy->supported.min_maxbe = 3;
|
||||||
phy->supported.max_maxbe = 8;
|
phy->supported.max_maxbe = 8;
|
||||||
phy->supported.min_frame_retries = -1;
|
phy->supported.min_frame_retries = 0;
|
||||||
phy->supported.max_frame_retries = 7;
|
phy->supported.max_frame_retries = 7;
|
||||||
phy->supported.max_csma_backoffs = 5;
|
phy->supported.max_csma_backoffs = 5;
|
||||||
phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
|
phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
|
||||||
@ -177,11 +177,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
|
if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
|
||||||
/* TODO should be 3, but our default value is -1 which means
|
local->phy->supported.min_frame_retries = 3;
|
||||||
* no ARET handling.
|
local->phy->supported.max_frame_retries = 3;
|
||||||
*/
|
|
||||||
local->phy->supported.min_frame_retries = -1;
|
|
||||||
local->phy->supported.max_frame_retries = -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hw->flags & IEEE802154_HW_PROMISCUOUS)
|
if (hw->flags & IEEE802154_HW_PROMISCUOUS)
|
||||||
|
@ -164,7 +164,6 @@ static int rfkill_gpio_remove(struct platform_device *pdev)
|
|||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
static const struct acpi_device_id rfkill_acpi_match[] = {
|
static const struct acpi_device_id rfkill_acpi_match[] = {
|
||||||
{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
|
{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
|
||||||
{ "BCM2E39", RFKILL_TYPE_BLUETOOTH },
|
|
||||||
{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
|
{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
|
||||||
{ "BCM2E40", RFKILL_TYPE_BLUETOOTH },
|
{ "BCM2E40", RFKILL_TYPE_BLUETOOTH },
|
||||||
{ "BCM2E64", RFKILL_TYPE_BLUETOOTH },
|
{ "BCM2E64", RFKILL_TYPE_BLUETOOTH },
|
||||||
|
Loading…
Reference in New Issue
Block a user