mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kern
el/git/bluetooth/bluetooth-next Johan Hedberg says: ==================== pull request: bluetooth-next 2021-02-11 Here's the main bluetooth-next pull request for 5.12: - Add support for advertising monitor offliading using Microsoft vendor extensions - Add firmware download support for MediaTek MT7921U USB devices - Suspend-related fixes for Qualcomm devices - Add support for Intel GarfieldPeak controller - Various other smaller fixes & cleanups Please let me know if there are any issues pulling. Thanks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0ae20159e8
@ -38,7 +38,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt:
|
||||
compatible = "usb1286,204e";
|
||||
reg = <1>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupt-name = "wakeup";
|
||||
interrupt-names = "wakeup";
|
||||
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
|
||||
};
|
||||
};
|
||||
|
@ -437,38 +437,31 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver
|
||||
tlv = (struct intel_tlv *)skb->data;
|
||||
switch (tlv->type) {
|
||||
case INTEL_TLV_CNVI_TOP:
|
||||
version->cnvi_top =
|
||||
__le32_to_cpu(get_unaligned_le32(tlv->val));
|
||||
version->cnvi_top = get_unaligned_le32(tlv->val);
|
||||
break;
|
||||
case INTEL_TLV_CNVR_TOP:
|
||||
version->cnvr_top =
|
||||
__le32_to_cpu(get_unaligned_le32(tlv->val));
|
||||
version->cnvr_top = get_unaligned_le32(tlv->val);
|
||||
break;
|
||||
case INTEL_TLV_CNVI_BT:
|
||||
version->cnvi_bt =
|
||||
__le32_to_cpu(get_unaligned_le32(tlv->val));
|
||||
version->cnvi_bt = get_unaligned_le32(tlv->val);
|
||||
break;
|
||||
case INTEL_TLV_CNVR_BT:
|
||||
version->cnvr_bt =
|
||||
__le32_to_cpu(get_unaligned_le32(tlv->val));
|
||||
version->cnvr_bt = get_unaligned_le32(tlv->val);
|
||||
break;
|
||||
case INTEL_TLV_DEV_REV_ID:
|
||||
version->dev_rev_id =
|
||||
__le16_to_cpu(get_unaligned_le16(tlv->val));
|
||||
version->dev_rev_id = get_unaligned_le16(tlv->val);
|
||||
break;
|
||||
case INTEL_TLV_IMAGE_TYPE:
|
||||
version->img_type = tlv->val[0];
|
||||
break;
|
||||
case INTEL_TLV_TIME_STAMP:
|
||||
version->timestamp =
|
||||
__le16_to_cpu(get_unaligned_le16(tlv->val));
|
||||
version->timestamp = get_unaligned_le16(tlv->val);
|
||||
break;
|
||||
case INTEL_TLV_BUILD_TYPE:
|
||||
version->build_type = tlv->val[0];
|
||||
break;
|
||||
case INTEL_TLV_BUILD_NUM:
|
||||
version->build_num =
|
||||
__le32_to_cpu(get_unaligned_le32(tlv->val));
|
||||
version->build_num = get_unaligned_le32(tlv->val);
|
||||
break;
|
||||
case INTEL_TLV_SECURE_BOOT:
|
||||
version->secure_boot = tlv->val[0];
|
||||
|
@ -94,6 +94,53 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qca_read_soc_version);
|
||||
|
||||
static int qca_read_fw_build_info(struct hci_dev *hdev)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct edl_event_hdr *edl;
|
||||
char cmd, build_label[QCA_FW_BUILD_VER_LEN];
|
||||
int build_lbl_len, err = 0;
|
||||
|
||||
bt_dev_dbg(hdev, "QCA read fw build info");
|
||||
|
||||
cmd = EDL_GET_BUILD_INFO_CMD;
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
|
||||
&cmd, 0, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "Reading QCA fw build info failed (%d)",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
edl = (struct edl_event_hdr *)(skb->data);
|
||||
if (!edl) {
|
||||
bt_dev_err(hdev, "QCA read fw build info with no header");
|
||||
err = -EILSEQ;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
|
||||
edl->rtype != EDL_GET_BUILD_INFO_CMD) {
|
||||
bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
|
||||
edl->rtype);
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
build_lbl_len = edl->data[0];
|
||||
if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
|
||||
memcpy(build_label, edl->data + 1, build_lbl_len);
|
||||
*(build_label + build_lbl_len) = '\0';
|
||||
}
|
||||
|
||||
hci_set_fw_info(hdev, "%s", build_label);
|
||||
|
||||
out:
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int qca_send_reset(struct hci_dev *hdev)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -517,6 +564,19 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* WCN399x supports the Microsoft vendor extension with 0xFD70 as the
|
||||
* VsMsftOpCode.
|
||||
*/
|
||||
switch (soc_type) {
|
||||
case QCA_WCN3990:
|
||||
case QCA_WCN3991:
|
||||
case QCA_WCN3998:
|
||||
hci_set_msft_opcode(hdev, 0xFD70);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Perform HCI reset */
|
||||
err = qca_send_reset(hdev);
|
||||
if (err < 0) {
|
||||
@ -524,6 +584,13 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (soc_type == QCA_WCN3991) {
|
||||
/* get fw build info */
|
||||
err = qca_read_fw_build_info(hdev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
bt_dev_info(hdev, "QCA setup on UART is completed");
|
||||
|
||||
return 0;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#define EDL_PATCH_CMD_LEN (1)
|
||||
#define EDL_PATCH_VER_REQ_CMD (0x19)
|
||||
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
|
||||
#define EDL_GET_BUILD_INFO_CMD (0x20)
|
||||
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
|
||||
#define MAX_SIZE_PER_TLV_SEGMENT (243)
|
||||
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
|
||||
|
@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
|
||||
|
||||
btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
|
||||
btqcomsmd_cmd_callback, btq);
|
||||
if (IS_ERR(btq->cmd_channel))
|
||||
return PTR_ERR(btq->cmd_channel);
|
||||
if (IS_ERR(btq->cmd_channel)) {
|
||||
ret = PTR_ERR(btq->cmd_channel);
|
||||
goto destroy_acl_channel;
|
||||
}
|
||||
|
||||
hdev = hci_alloc_dev();
|
||||
if (!hdev)
|
||||
return -ENOMEM;
|
||||
if (!hdev) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_cmd_channel;
|
||||
}
|
||||
|
||||
hci_set_drvdata(hdev, btq);
|
||||
btq->hdev = hdev;
|
||||
@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
|
||||
hdev->set_bdaddr = qca_set_bdaddr_rome;
|
||||
|
||||
ret = hci_register_dev(hdev);
|
||||
if (ret < 0) {
|
||||
hci_free_dev(hdev);
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto hci_free_dev;
|
||||
|
||||
platform_set_drvdata(pdev, btq);
|
||||
|
||||
return 0;
|
||||
|
||||
hci_free_dev:
|
||||
hci_free_dev(hdev);
|
||||
destroy_cmd_channel:
|
||||
rpmsg_destroy_ept(btq->cmd_channel);
|
||||
destroy_acl_channel:
|
||||
rpmsg_destroy_ept(btq->acl_channel);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btqcomsmd_remove(struct platform_device *pdev)
|
||||
|
@ -38,6 +38,19 @@
|
||||
.hci_ver = (hciv), \
|
||||
.hci_bus = (bus)
|
||||
|
||||
enum btrtl_chip_id {
|
||||
CHIP_ID_8723A,
|
||||
CHIP_ID_8723B,
|
||||
CHIP_ID_8821A,
|
||||
CHIP_ID_8761A,
|
||||
CHIP_ID_8822B = 8,
|
||||
CHIP_ID_8723D,
|
||||
CHIP_ID_8821C,
|
||||
CHIP_ID_8822C = 13,
|
||||
CHIP_ID_8761B,
|
||||
CHIP_ID_8852A = 18,
|
||||
};
|
||||
|
||||
struct id_table {
|
||||
__u16 match_flags;
|
||||
__u16 lmp_subver;
|
||||
@ -58,6 +71,7 @@ struct btrtl_device_info {
|
||||
u8 *cfg_data;
|
||||
int cfg_len;
|
||||
bool drop_fw;
|
||||
int project_id;
|
||||
};
|
||||
|
||||
static const struct id_table ic_id_table[] = {
|
||||
@ -307,9 +321,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
|
||||
|
||||
/* Find project_id in table */
|
||||
for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) {
|
||||
if (project_id == project_id_to_lmp_subver[i].id)
|
||||
if (project_id == project_id_to_lmp_subver[i].id) {
|
||||
btrtl_dev->project_id = project_id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
|
||||
rtl_dev_err(hdev, "unknown project id %d", project_id);
|
||||
@ -658,6 +674,12 @@ out_free:
|
||||
}
|
||||
}
|
||||
|
||||
/* RTL8822CE supports the Microsoft vendor extension and uses 0xFCF0
|
||||
* for VsMsftOpCode.
|
||||
*/
|
||||
if (lmp_subver == RTL_ROM_LMP_8822B)
|
||||
hci_set_msft_opcode(hdev, 0xFCF0);
|
||||
|
||||
return btrtl_dev;
|
||||
|
||||
err_free:
|
||||
@ -708,13 +730,28 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
|
||||
|
||||
ret = btrtl_download_firmware(hdev, btrtl_dev);
|
||||
|
||||
btrtl_free(btrtl_dev);
|
||||
|
||||
/* Enable controller to do both LE scan and BR/EDR inquiry
|
||||
* simultaneously.
|
||||
*/
|
||||
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
|
||||
|
||||
/* Enable central-peripheral role (able to create new connections with
|
||||
* an existing connection in slave role).
|
||||
*/
|
||||
/* Enable WBS supported for the specific Realtek devices. */
|
||||
switch (btrtl_dev->project_id) {
|
||||
case CHIP_ID_8822C:
|
||||
case CHIP_ID_8852A:
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
||||
break;
|
||||
default:
|
||||
rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
|
||||
rtl_dev_dbg(hdev, "WBS supported not enabled.");
|
||||
break;
|
||||
}
|
||||
|
||||
btrtl_free(btrtl_dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
|
||||
|
@ -368,6 +368,8 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEWGEN |
|
||||
BTUSB_WIDEBAND_SPEECH},
|
||||
{ USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_NEWGEN |
|
||||
BTUSB_WIDEBAND_SPEECH},
|
||||
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
|
||||
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
|
||||
@ -506,7 +508,6 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
|
||||
#define BTUSB_HW_RESET_ACTIVE 12
|
||||
#define BTUSB_TX_WAIT_VND_EVT 13
|
||||
#define BTUSB_WAKEUP_DISABLE 14
|
||||
#define BTUSB_USE_ALT1_FOR_WBS 15
|
||||
|
||||
struct btusb_data {
|
||||
struct hci_dev *hdev;
|
||||
@ -1736,15 +1737,12 @@ static void btusb_work(struct work_struct *work)
|
||||
new_alts = data->sco_num;
|
||||
}
|
||||
} else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
|
||||
/* Check if Alt 6 is supported for Transparent audio */
|
||||
if (btusb_find_altsetting(data, 6)) {
|
||||
data->usb_alt6_packet_flow = true;
|
||||
new_alts = 6;
|
||||
} else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) {
|
||||
new_alts = 1;
|
||||
} else {
|
||||
bt_dev_err(hdev, "Device does not support ALT setting 6");
|
||||
}
|
||||
/* Bluetooth USB spec recommends alt 6 (63 bytes), but
|
||||
* many adapters do not support it. Alt 1 appears to
|
||||
* work for all adapters that do not have alt 6, and
|
||||
* which work with WBS at all.
|
||||
*/
|
||||
new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
|
||||
}
|
||||
|
||||
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
|
||||
@ -1903,7 +1901,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
|
||||
le16_to_cpu(rp->lmp_subver) == 0x1012 &&
|
||||
le16_to_cpu(rp->hci_rev) == 0x0810 &&
|
||||
le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_4_0) {
|
||||
bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues\n");
|
||||
bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues");
|
||||
|
||||
pm_runtime_allow(&data->udev->dev);
|
||||
|
||||
@ -1911,7 +1909,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
|
||||
if (ret >= 0)
|
||||
msleep(200);
|
||||
else
|
||||
bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround\n");
|
||||
bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround");
|
||||
|
||||
pm_runtime_forbid(&data->udev->dev);
|
||||
|
||||
@ -2924,7 +2922,10 @@ finish:
|
||||
* extension are using 0xFC1E for VsMsftOpCode.
|
||||
*/
|
||||
switch (ver.hw_variant) {
|
||||
case 0x11: /* JfP */
|
||||
case 0x12: /* ThP */
|
||||
case 0x13: /* HrP */
|
||||
case 0x14: /* CcP */
|
||||
hci_set_msft_opcode(hdev, 0xFC1E);
|
||||
break;
|
||||
}
|
||||
@ -3127,6 +3128,12 @@ static int btusb_shutdown_intel_new(struct hci_dev *hdev)
|
||||
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
|
||||
|
||||
#define HCI_WMT_MAX_EVENT_SIZE 64
|
||||
/* It is for mt79xx download rom patch*/
|
||||
#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
|
||||
#define MTK_FW_ROM_PATCH_GD_SIZE 64
|
||||
#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
|
||||
#define MTK_SEC_MAP_COMMON_SIZE 12
|
||||
#define MTK_SEC_MAP_NEED_SEND_SIZE 52
|
||||
|
||||
enum {
|
||||
BTMTK_WMT_PATCH_DWNLD = 0x1,
|
||||
@ -3138,6 +3145,7 @@ enum {
|
||||
enum {
|
||||
BTMTK_WMT_INVALID,
|
||||
BTMTK_WMT_PATCH_UNDONE,
|
||||
BTMTK_WMT_PATCH_PROGRESS,
|
||||
BTMTK_WMT_PATCH_DONE,
|
||||
BTMTK_WMT_ON_UNDONE,
|
||||
BTMTK_WMT_ON_DONE,
|
||||
@ -3153,7 +3161,7 @@ struct btmtk_wmt_hdr {
|
||||
|
||||
struct btmtk_hci_wmt_cmd {
|
||||
struct btmtk_wmt_hdr hdr;
|
||||
u8 data[256];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct btmtk_hci_wmt_evt {
|
||||
@ -3182,6 +3190,40 @@ struct btmtk_hci_wmt_params {
|
||||
u32 *status;
|
||||
};
|
||||
|
||||
struct btmtk_patch_header {
|
||||
u8 datetime[16];
|
||||
u8 platform[4];
|
||||
__le16 hwver;
|
||||
__le16 swver;
|
||||
__le32 magicnum;
|
||||
} __packed;
|
||||
|
||||
struct btmtk_global_desc {
|
||||
__le32 patch_ver;
|
||||
__le32 sub_sys;
|
||||
__le32 feature_opt;
|
||||
__le32 section_num;
|
||||
} __packed;
|
||||
|
||||
struct btmtk_section_map {
|
||||
__le32 sectype;
|
||||
__le32 secoffset;
|
||||
__le32 secsize;
|
||||
union {
|
||||
__le32 u4SecSpec[13];
|
||||
struct {
|
||||
__le32 dlAddr;
|
||||
__le32 dlsize;
|
||||
__le32 seckeyidx;
|
||||
__le32 alignlen;
|
||||
__le32 sectype;
|
||||
__le32 dlmodecrctype;
|
||||
__le32 crc;
|
||||
__le32 reserved[6];
|
||||
} bin_info_spec;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
static void btusb_mtk_wmt_recv(struct urb *urb)
|
||||
{
|
||||
struct hci_dev *hdev = urb->context;
|
||||
@ -3199,7 +3241,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
|
||||
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
hdev->stat.err_rx++;
|
||||
goto err_out;
|
||||
return;
|
||||
}
|
||||
|
||||
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
|
||||
@ -3217,13 +3259,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
|
||||
*/
|
||||
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
|
||||
data->evt_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!data->evt_skb)
|
||||
goto err_out;
|
||||
if (!data->evt_skb) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
err = hci_recv_frame(hdev, skb);
|
||||
if (err < 0)
|
||||
goto err_free_skb;
|
||||
if (err < 0) {
|
||||
kfree_skb(data->evt_skb);
|
||||
data->evt_skb = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
|
||||
&data->flags)) {
|
||||
@ -3232,11 +3279,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
|
||||
wake_up_bit(&data->flags,
|
||||
BTUSB_TX_WAIT_VND_EVT);
|
||||
}
|
||||
err_out:
|
||||
return;
|
||||
err_free_skb:
|
||||
kfree_skb(data->evt_skb);
|
||||
data->evt_skb = NULL;
|
||||
return;
|
||||
} else if (urb->status == -ENOENT) {
|
||||
/* Avoid suspend failed when usb_kill_urb */
|
||||
@ -3252,7 +3294,7 @@ err_free_skb:
|
||||
* to generate the event. Otherwise, the WMT event cannot return from
|
||||
* the device successfully.
|
||||
*/
|
||||
udelay(100);
|
||||
udelay(500);
|
||||
|
||||
usb_anchor_urb(urb, &data->ctrl_anchor);
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
@ -3327,7 +3369,7 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
|
||||
u32 hlen, status = BTMTK_WMT_INVALID;
|
||||
struct btmtk_hci_wmt_evt *wmt_evt;
|
||||
struct btmtk_hci_wmt_cmd wc;
|
||||
struct btmtk_hci_wmt_cmd *wc;
|
||||
struct btmtk_wmt_hdr *hdr;
|
||||
int err;
|
||||
|
||||
@ -3341,20 +3383,24 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
if (hlen > 255)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (struct btmtk_wmt_hdr *)&wc;
|
||||
wc = kzalloc(hlen, GFP_KERNEL);
|
||||
if (!wc)
|
||||
return -ENOMEM;
|
||||
|
||||
hdr = &wc->hdr;
|
||||
hdr->dir = 1;
|
||||
hdr->op = wmt_params->op;
|
||||
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
|
||||
hdr->flag = wmt_params->flag;
|
||||
memcpy(wc.data, wmt_params->data, wmt_params->dlen);
|
||||
memcpy(wc->data, wmt_params->data, wmt_params->dlen);
|
||||
|
||||
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
|
||||
|
||||
err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
|
||||
err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
|
||||
|
||||
if (err < 0) {
|
||||
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
|
||||
return err;
|
||||
goto err_free_wc;
|
||||
}
|
||||
|
||||
/* The vendor specific WMT commands are all answered by a vendor
|
||||
@ -3371,13 +3417,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
if (err == -EINTR) {
|
||||
bt_dev_err(hdev, "Execution of wmt command interrupted");
|
||||
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
|
||||
return err;
|
||||
goto err_free_wc;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
bt_dev_err(hdev, "Execution of wmt command timed out");
|
||||
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
|
||||
return -ETIMEDOUT;
|
||||
err = -ETIMEDOUT;
|
||||
goto err_free_wc;
|
||||
}
|
||||
|
||||
/* Parse and handle the return WMT event */
|
||||
@ -3405,6 +3452,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
else
|
||||
status = BTMTK_WMT_ON_UNDONE;
|
||||
break;
|
||||
case BTMTK_WMT_PATCH_DWNLD:
|
||||
if (wmt_evt->whdr.flag == 2)
|
||||
status = BTMTK_WMT_PATCH_DONE;
|
||||
else if (wmt_evt->whdr.flag == 1)
|
||||
status = BTMTK_WMT_PATCH_PROGRESS;
|
||||
else
|
||||
status = BTMTK_WMT_PATCH_UNDONE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (wmt_params->status)
|
||||
@ -3413,6 +3468,119 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
err_free_skb:
|
||||
kfree_skb(data->evt_skb);
|
||||
data->evt_skb = NULL;
|
||||
err_free_wc:
|
||||
kfree(wc);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname)
|
||||
{
|
||||
struct btmtk_hci_wmt_params wmt_params;
|
||||
struct btmtk_global_desc *globaldesc = NULL;
|
||||
struct btmtk_section_map *sectionmap;
|
||||
const struct firmware *fw;
|
||||
const u8 *fw_ptr;
|
||||
const u8 *fw_bin_ptr;
|
||||
int err, dlen, i, status;
|
||||
u8 flag, first_block, retry;
|
||||
u32 section_num, dl_size, section_offset;
|
||||
u8 cmd[64];
|
||||
|
||||
err = request_firmware(&fw, fwname, &hdev->dev);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
fw_ptr = fw->data;
|
||||
fw_bin_ptr = fw_ptr;
|
||||
globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
|
||||
section_num = globaldesc->section_num;
|
||||
|
||||
for (i = 0; i < section_num; i++) {
|
||||
first_block = 1;
|
||||
fw_ptr = fw_bin_ptr;
|
||||
sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
|
||||
MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
|
||||
|
||||
section_offset = sectionmap->secoffset;
|
||||
dl_size = sectionmap->bin_info_spec.dlsize;
|
||||
|
||||
if (dl_size > 0) {
|
||||
retry = 20;
|
||||
while (retry > 0) {
|
||||
cmd[0] = 0; /* 0 means legacy dl mode. */
|
||||
memcpy(cmd + 1,
|
||||
fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
|
||||
MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
|
||||
MTK_SEC_MAP_COMMON_SIZE,
|
||||
MTK_SEC_MAP_NEED_SEND_SIZE + 1);
|
||||
|
||||
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
|
||||
wmt_params.status = &status;
|
||||
wmt_params.flag = 0;
|
||||
wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
|
||||
wmt_params.data = &cmd;
|
||||
|
||||
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
|
||||
err);
|
||||
goto err_release_fw;
|
||||
}
|
||||
|
||||
if (status == BTMTK_WMT_PATCH_UNDONE) {
|
||||
break;
|
||||
} else if (status == BTMTK_WMT_PATCH_PROGRESS) {
|
||||
msleep(100);
|
||||
retry--;
|
||||
} else if (status == BTMTK_WMT_PATCH_DONE) {
|
||||
goto next_section;
|
||||
} else {
|
||||
bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
|
||||
status);
|
||||
goto err_release_fw;
|
||||
}
|
||||
}
|
||||
|
||||
fw_ptr += section_offset;
|
||||
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
|
||||
wmt_params.status = NULL;
|
||||
|
||||
while (dl_size > 0) {
|
||||
dlen = min_t(int, 250, dl_size);
|
||||
if (first_block == 1) {
|
||||
flag = 1;
|
||||
first_block = 0;
|
||||
} else if (dl_size - dlen <= 0) {
|
||||
flag = 3;
|
||||
} else {
|
||||
flag = 2;
|
||||
}
|
||||
|
||||
wmt_params.flag = flag;
|
||||
wmt_params.dlen = dlen;
|
||||
wmt_params.data = fw_ptr;
|
||||
|
||||
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
|
||||
err);
|
||||
goto err_release_fw;
|
||||
}
|
||||
|
||||
dl_size -= dlen;
|
||||
fw_ptr += dlen;
|
||||
}
|
||||
}
|
||||
next_section:
|
||||
continue;
|
||||
}
|
||||
/* Wait a few moments for firmware activation done */
|
||||
usleep_range(100000, 120000);
|
||||
|
||||
err_release_fw:
|
||||
release_firmware(fw);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -3465,7 +3633,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
|
||||
while (fw_size > 0) {
|
||||
dlen = min_t(int, 250, fw_size);
|
||||
|
||||
/* Tell deivice the position in sequence */
|
||||
/* Tell device the position in sequence */
|
||||
if (fw_size - dlen <= 0)
|
||||
flag = 3;
|
||||
else if (fw_size < fw->size - 30)
|
||||
@ -3555,9 +3723,9 @@ err_free_buf:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int btusb_mtk_id_get(struct btusb_data *data, u32 *id)
|
||||
static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
|
||||
{
|
||||
return btusb_mtk_reg_read(data, 0x80000008, id);
|
||||
return btusb_mtk_reg_read(data, reg, id);
|
||||
}
|
||||
|
||||
static int btusb_mtk_setup(struct hci_dev *hdev)
|
||||
@ -3571,16 +3739,31 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
|
||||
const char *fwname;
|
||||
int err, status;
|
||||
u32 dev_id;
|
||||
char fw_bin_name[64];
|
||||
u32 fw_version;
|
||||
u8 param;
|
||||
|
||||
calltime = ktime_get();
|
||||
|
||||
err = btusb_mtk_id_get(data, &dev_id);
|
||||
err = btusb_mtk_id_get(data, 0x80000008, &dev_id);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to get device id (%d)", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!dev_id) {
|
||||
err = btusb_mtk_id_get(data, 0x70010200, &dev_id);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to get device id (%d)", err);
|
||||
return err;
|
||||
}
|
||||
err = btusb_mtk_id_get(data, 0x80021004, &fw_version);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to get fw version (%d)", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
switch (dev_id) {
|
||||
case 0x7663:
|
||||
fwname = FIRMWARE_MT7663;
|
||||
@ -3588,8 +3771,28 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
|
||||
case 0x7668:
|
||||
fwname = FIRMWARE_MT7668;
|
||||
break;
|
||||
case 0x7961:
|
||||
snprintf(fw_bin_name, sizeof(fw_bin_name),
|
||||
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
|
||||
dev_id & 0xffff, (fw_version & 0xff) + 1);
|
||||
err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name);
|
||||
|
||||
/* Enable Bluetooth protocol */
|
||||
param = 1;
|
||||
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
|
||||
wmt_params.flag = 0;
|
||||
wmt_params.dlen = sizeof(param);
|
||||
wmt_params.data = ¶m;
|
||||
wmt_params.status = NULL;
|
||||
|
||||
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
|
||||
return err;
|
||||
}
|
||||
goto done;
|
||||
default:
|
||||
bt_dev_err(hdev, "Unsupported support hardware variant (%08x)",
|
||||
bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
|
||||
dev_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -3665,6 +3868,7 @@ ignore_func_on:
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
done:
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, calltime);
|
||||
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
|
||||
@ -3725,7 +3929,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
|
||||
|
||||
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
bt_dev_err(hdev, "%s: No memory\n", __func__);
|
||||
bt_dev_err(hdev, "%s: No memory", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -3734,7 +3938,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
|
||||
|
||||
ret = btusb_send_frame(hdev, skb);
|
||||
if (ret) {
|
||||
bt_dev_err(hdev, "%s: configuration failed\n", __func__);
|
||||
bt_dev_err(hdev, "%s: configuration failed", __func__);
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
@ -4069,6 +4273,13 @@ static int btusb_setup_qca(struct hci_dev *hdev)
|
||||
info = &qca_devices_table[i];
|
||||
}
|
||||
if (!info) {
|
||||
/* If the rom_version is not matched in the qca_devices_table
|
||||
* and the high ROM version is not zero, we assume this chip no
|
||||
* need to load the rampatch and nvm.
|
||||
*/
|
||||
if (ver_rom & ~0xffffU)
|
||||
return 0;
|
||||
|
||||
bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -4264,6 +4475,20 @@ static bool btusb_prevent_wake(struct hci_dev *hdev)
|
||||
return !device_may_wakeup(&data->udev->dev);
|
||||
}
|
||||
|
||||
static int btusb_shutdown_qca(struct hci_dev *hdev)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
bt_dev_err(hdev, "HCI reset during shutdown failed");
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btusb_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
@ -4523,6 +4748,7 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
|
||||
if (id->driver_info & BTUSB_QCA_WCN6855) {
|
||||
data->setup_on_usb = btusb_setup_qca;
|
||||
hdev->shutdown = btusb_shutdown_qca;
|
||||
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
|
||||
hdev->cmd_timeout = btusb_qca_cmd_timeout;
|
||||
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
|
||||
@ -4548,10 +4774,6 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
* (DEVICE_REMOTE_WAKEUP)
|
||||
*/
|
||||
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
|
||||
if (btusb_find_altsetting(data, 1))
|
||||
set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags);
|
||||
else
|
||||
bt_dev_err(hdev, "Device does not support ALT setting 1");
|
||||
}
|
||||
|
||||
if (!reset)
|
||||
@ -4627,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
data->diag = NULL;
|
||||
}
|
||||
|
||||
if (enable_autosuspend)
|
||||
usb_enable_autosuspend(data->udev);
|
||||
if (!enable_autosuspend)
|
||||
usb_disable_autosuspend(data->udev);
|
||||
|
||||
err = hci_register_dev(hdev);
|
||||
if (err < 0)
|
||||
@ -4688,6 +4910,9 @@ static void btusb_disconnect(struct usb_interface *intf)
|
||||
gpiod_put(data->reset_gpio);
|
||||
|
||||
hci_free_dev(hdev);
|
||||
|
||||
if (!enable_autosuspend)
|
||||
usb_enable_autosuspend(data->udev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -654,6 +654,7 @@ static const struct h4_recv_pkt bcm_recv_pkts[] = {
|
||||
{ H4_RECV_ACL, .recv = hci_recv_frame },
|
||||
{ H4_RECV_SCO, .recv = hci_recv_frame },
|
||||
{ H4_RECV_EVENT, .recv = hci_recv_frame },
|
||||
{ H4_RECV_ISO, .recv = hci_recv_frame },
|
||||
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
|
||||
{ BCM_RECV_NULL, .recv = hci_recv_diag },
|
||||
{ BCM_RECV_TYPE49, .recv = hci_recv_diag },
|
||||
|
@ -906,6 +906,11 @@ static int h5_btrtl_setup(struct h5 *h5)
|
||||
/* Give the device some time before the hci-core sends it a reset */
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
/* Enable controller to do both LE scan and BR/EDR inquiry
|
||||
* simultaneously.
|
||||
*/
|
||||
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
|
||||
|
||||
out_free:
|
||||
btrtl_free(btrtl_dev);
|
||||
|
||||
@ -1022,6 +1027,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
|
||||
.data = (const void *)&rtl_vnd },
|
||||
{ .compatible = "realtek,rtl8723bs-bt",
|
||||
.data = (const void *)&rtl_vnd },
|
||||
{ .compatible = "realtek,rtl8723ds-bt",
|
||||
.data = (const void *)&rtl_vnd },
|
||||
#endif
|
||||
{ },
|
||||
};
|
||||
|
@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
|
||||
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
|
||||
goto no_schedule;
|
||||
|
||||
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
|
||||
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
|
||||
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
|
||||
goto no_schedule;
|
||||
}
|
||||
|
||||
BT_DBG("");
|
||||
|
||||
@ -174,10 +173,10 @@ restart:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
clear_bit(HCI_UART_SENDING, &hu->tx_state);
|
||||
if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
|
||||
goto restart;
|
||||
|
||||
clear_bit(HCI_UART_SENDING, &hu->tx_state);
|
||||
wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,8 @@
|
||||
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
|
||||
#define CMD_TRANS_TIMEOUT_MS 100
|
||||
#define MEMDUMP_TIMEOUT_MS 8000
|
||||
#define IBS_DISABLE_SSR_TIMEOUT_MS (MEMDUMP_TIMEOUT_MS + 1000)
|
||||
#define IBS_DISABLE_SSR_TIMEOUT_MS \
|
||||
(MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
|
||||
#define FW_DOWNLOAD_TIMEOUT_MS 3000
|
||||
|
||||
/* susclk rate */
|
||||
@ -76,7 +77,8 @@ enum qca_flags {
|
||||
QCA_MEMDUMP_COLLECTION,
|
||||
QCA_HW_ERROR_EVENT,
|
||||
QCA_SSR_TRIGGERED,
|
||||
QCA_BT_OFF
|
||||
QCA_BT_OFF,
|
||||
QCA_ROM_FW
|
||||
};
|
||||
|
||||
enum qca_capabilities {
|
||||
@ -1024,7 +1026,9 @@ static void qca_controller_memdump(struct work_struct *work)
|
||||
dump_size = __le32_to_cpu(dump->dump_size);
|
||||
if (!(dump_size)) {
|
||||
bt_dev_err(hu->hdev, "Rx invalid memdump size");
|
||||
kfree(qca_memdump);
|
||||
kfree_skb(skb);
|
||||
qca->qca_memdump = NULL;
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
return;
|
||||
}
|
||||
@ -1661,6 +1665,7 @@ static int qca_setup(struct hci_uart *hu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clear_bit(QCA_ROM_FW, &qca->flags);
|
||||
/* Patch downloading has to be done without IBS mode */
|
||||
set_bit(QCA_IBS_DISABLED, &qca->flags);
|
||||
|
||||
@ -1718,12 +1723,14 @@ retry:
|
||||
hu->hdev->cmd_timeout = qca_cmd_timeout;
|
||||
} else if (ret == -ENOENT) {
|
||||
/* No patch/nvm-config found, run with original fw/config */
|
||||
set_bit(QCA_ROM_FW, &qca->flags);
|
||||
ret = 0;
|
||||
} else if (ret == -EAGAIN) {
|
||||
/*
|
||||
* Userspace firmware loader will return -EAGAIN in case no
|
||||
* patch/nvm-config is found, so run with original fw/config.
|
||||
*/
|
||||
set_bit(QCA_ROM_FW, &qca->flags);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -2100,17 +2107,29 @@ static int __maybe_unused qca_suspend(struct device *dev)
|
||||
|
||||
set_bit(QCA_SUSPENDING, &qca->flags);
|
||||
|
||||
if (test_bit(QCA_BT_OFF, &qca->flags))
|
||||
/* if BT SoC is running with default firmware then it does not
|
||||
* support in-band sleep
|
||||
*/
|
||||
if (test_bit(QCA_ROM_FW, &qca->flags))
|
||||
return 0;
|
||||
|
||||
if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
|
||||
/* During SSR after memory dump collection, controller will be
|
||||
* powered off and then powered on.If controller is powered off
|
||||
* during SSR then we should wait until SSR is completed.
|
||||
*/
|
||||
if (test_bit(QCA_BT_OFF, &qca->flags) &&
|
||||
!test_bit(QCA_SSR_TRIGGERED, &qca->flags))
|
||||
return 0;
|
||||
|
||||
if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
|
||||
test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
|
||||
wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
|
||||
IBS_DISABLE_SSR_TIMEOUT_MS :
|
||||
FW_DOWNLOAD_TIMEOUT_MS;
|
||||
|
||||
/* QCA_IBS_DISABLED flag is set to true, During FW download
|
||||
* and during memory dump collection. It is reset to false,
|
||||
* After FW download complete and after memory dump collections.
|
||||
* After FW download complete.
|
||||
*/
|
||||
wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
|
||||
TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
|
||||
@ -2122,10 +2141,6 @@ static int __maybe_unused qca_suspend(struct device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/* After memory dump collection, Controller is powered off.*/
|
||||
if (test_bit(QCA_BT_OFF, &qca->flags))
|
||||
return 0;
|
||||
|
||||
cancel_work_sync(&qca->ws_awake_device);
|
||||
cancel_work_sync(&qca->ws_awake_rx);
|
||||
|
||||
|
@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work)
|
||||
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
|
||||
kfree_skb(skb);
|
||||
}
|
||||
} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
|
||||
|
||||
clear_bit(HCI_UART_SENDING, &hu->tx_state);
|
||||
} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
|
||||
}
|
||||
|
||||
/* ------- Interface to HCI layer ------ */
|
||||
|
@ -238,6 +238,14 @@ enum {
|
||||
* during the hdev->setup vendor callback.
|
||||
*/
|
||||
HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
|
||||
|
||||
/*
|
||||
* When this quirk is set, then the hci_suspend_notifier is not
|
||||
* registered. This is intended for devices which drop completely
|
||||
* from the bus on system-suspend and which will show up as a new
|
||||
* HCI after resume.
|
||||
*/
|
||||
HCI_QUIRK_NO_SUSPEND_NOTIFIER,
|
||||
};
|
||||
|
||||
/* HCI device flags */
|
||||
|
@ -105,6 +105,8 @@ enum suspend_tasks {
|
||||
SUSPEND_POWERING_DOWN,
|
||||
|
||||
SUSPEND_PREPARE_NOTIFIER,
|
||||
|
||||
SUSPEND_SET_ADV_FILTER,
|
||||
__SUSPEND_NUM_TASKS
|
||||
};
|
||||
|
||||
@ -250,15 +252,31 @@ struct adv_pattern {
|
||||
__u8 value[HCI_MAX_AD_LENGTH];
|
||||
};
|
||||
|
||||
struct adv_rssi_thresholds {
|
||||
__s8 low_threshold;
|
||||
__s8 high_threshold;
|
||||
__u16 low_threshold_timeout;
|
||||
__u16 high_threshold_timeout;
|
||||
__u8 sampling_period;
|
||||
};
|
||||
|
||||
struct adv_monitor {
|
||||
struct list_head patterns;
|
||||
bool active;
|
||||
struct adv_rssi_thresholds rssi;
|
||||
__u16 handle;
|
||||
|
||||
enum {
|
||||
ADV_MONITOR_STATE_NOT_REGISTERED,
|
||||
ADV_MONITOR_STATE_REGISTERED,
|
||||
ADV_MONITOR_STATE_OFFLOADED
|
||||
} state;
|
||||
};
|
||||
|
||||
#define HCI_MIN_ADV_MONITOR_HANDLE 1
|
||||
#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
|
||||
#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
|
||||
#define HCI_ADV_MONITOR_EXT_NONE 1
|
||||
#define HCI_ADV_MONITOR_EXT_MSFT 2
|
||||
|
||||
#define HCI_MAX_SHORT_NAME_LENGTH 10
|
||||
|
||||
@ -1316,10 +1334,15 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
|
||||
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
|
||||
|
||||
void hci_adv_monitors_clear(struct hci_dev *hdev);
|
||||
void hci_free_adv_monitor(struct adv_monitor *monitor);
|
||||
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
|
||||
int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle);
|
||||
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
|
||||
int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status);
|
||||
int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status);
|
||||
bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
|
||||
int *err);
|
||||
bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err);
|
||||
bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err);
|
||||
bool hci_is_adv_monitoring(struct hci_dev *hdev);
|
||||
int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev);
|
||||
|
||||
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
|
||||
|
||||
@ -1342,6 +1365,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
|
||||
#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
|
||||
#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
|
||||
#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
|
||||
#define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M)
|
||||
#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
|
||||
#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
|
||||
#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
|
||||
@ -1794,7 +1818,10 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
|
||||
u8 instance);
|
||||
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
|
||||
u8 instance);
|
||||
void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
|
||||
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
|
||||
int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status);
|
||||
int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status);
|
||||
|
||||
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
|
||||
u16 to_multiplier);
|
||||
|
@ -207,6 +207,7 @@ struct l2cap_hdr {
|
||||
__le16 len;
|
||||
__le16 cid;
|
||||
} __packed;
|
||||
#define L2CAP_LEN_SIZE 2
|
||||
#define L2CAP_HDR_SIZE 4
|
||||
#define L2CAP_ENH_HDR_SIZE 6
|
||||
#define L2CAP_EXT_HDR_SIZE 8
|
||||
|
@ -821,6 +821,22 @@ struct mgmt_rp_add_ext_adv_data {
|
||||
__u8 instance;
|
||||
} __packed;
|
||||
|
||||
struct mgmt_adv_rssi_thresholds {
|
||||
__s8 high_threshold;
|
||||
__le16 high_threshold_timeout;
|
||||
__s8 low_threshold;
|
||||
__le16 low_threshold_timeout;
|
||||
__u8 sampling_period;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI 0x0056
|
||||
struct mgmt_cp_add_adv_patterns_monitor_rssi {
|
||||
struct mgmt_adv_rssi_thresholds rssi;
|
||||
__u8 pattern_count;
|
||||
struct mgmt_adv_pattern patterns[];
|
||||
} __packed;
|
||||
#define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8
|
||||
|
||||
#define MGMT_EV_CMD_COMPLETE 0x0001
|
||||
struct mgmt_ev_cmd_complete {
|
||||
__le16 opcode;
|
||||
|
@ -381,9 +381,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
|
||||
hdev = hci_dev_get(req->id);
|
||||
if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
|
||||
struct a2mp_amp_assoc_rsp rsp;
|
||||
rsp.id = req->id;
|
||||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
rsp.id = req->id;
|
||||
|
||||
if (tmp) {
|
||||
rsp.status = A2MP_STATUS_COLLISION_OCCURED;
|
||||
@ -512,6 +512,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
|
||||
assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
|
||||
if (!assoc) {
|
||||
amp_ctrl_put(ctrl);
|
||||
hci_dev_put(hdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -508,7 +508,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
||||
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
|
||||
if (amount < 0)
|
||||
amount = 0;
|
||||
err = put_user(amount, (int __user *) arg);
|
||||
err = put_user(amount, (int __user *)arg);
|
||||
break;
|
||||
|
||||
case TIOCINQ:
|
||||
@ -519,7 +519,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
amount = skb ? skb->len : 0;
|
||||
release_sock(sk);
|
||||
err = put_user(amount, (int __user *) arg);
|
||||
err = put_user(amount, (int __user *)arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -637,7 +637,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
|
||||
struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
|
||||
seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent");
|
||||
|
||||
if (l->custom_seq_show) {
|
||||
seq_putc(seq, ' ');
|
||||
@ -657,7 +657,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
|
||||
sk_wmem_alloc_get(sk),
|
||||
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
|
||||
sock_i_ino(sk),
|
||||
bt->parent? sock_i_ino(bt->parent): 0LU);
|
||||
bt->parent ? sock_i_ino(bt->parent) : 0LU);
|
||||
|
||||
if (l->custom_seq_show) {
|
||||
seq_putc(seq, ' ');
|
||||
@ -678,7 +678,7 @@ static const struct seq_operations bt_seq_ops = {
|
||||
|
||||
int bt_procfs_init(struct net *net, const char *name,
|
||||
struct bt_sock_list *sk_list,
|
||||
int (* seq_show)(struct seq_file *, void *))
|
||||
int (*seq_show)(struct seq_file *, void *))
|
||||
{
|
||||
sk_list->custom_seq_show = seq_show;
|
||||
|
||||
@ -694,7 +694,7 @@ void bt_procfs_cleanup(struct net *net, const char *name)
|
||||
#else
|
||||
int bt_procfs_init(struct net *net, const char *name,
|
||||
struct bt_sock_list *sk_list,
|
||||
int (* seq_show)(struct seq_file *, void *))
|
||||
int (*seq_show)(struct seq_file *, void *))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -297,6 +297,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
if (!mgr)
|
||||
return;
|
||||
|
||||
cp.phy_handle = hcon->handle;
|
||||
cp.len_so_far = cpu_to_le16(0);
|
||||
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
|
||||
|
@ -203,6 +203,23 @@ static void hci_acl_create_connection(struct hci_conn *conn)
|
||||
|
||||
BT_DBG("hcon %p", conn);
|
||||
|
||||
/* Many controllers disallow HCI Create Connection while it is doing
|
||||
* HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
|
||||
* Connection. This may cause the MGMT discovering state to become false
|
||||
* without user space's request but it is okay since the MGMT Discovery
|
||||
* APIs do not promise that discovery should be done forever. Instead,
|
||||
* the user space monitors the status of MGMT discovering and it may
|
||||
* request for discovery again when this flag becomes false.
|
||||
*/
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
|
||||
/* Put this connection to "pending" state so that it will be
|
||||
* executed after the inquiry cancel command complete event.
|
||||
*/
|
||||
conn->state = BT_CONNECT2;
|
||||
hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
conn->state = BT_CONNECT;
|
||||
conn->out = true;
|
||||
conn->role = HCI_ROLE_MASTER;
|
||||
@ -276,6 +293,20 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle)
|
||||
hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static bool find_next_esco_param(struct hci_conn *conn,
|
||||
const struct sco_param *esco_param, int size)
|
||||
{
|
||||
for (; conn->attempt <= size; conn->attempt++) {
|
||||
if (lmp_esco_2m_capable(conn->link) ||
|
||||
(esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
|
||||
break;
|
||||
BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
|
||||
conn, conn->attempt);
|
||||
}
|
||||
|
||||
return conn->attempt <= size;
|
||||
}
|
||||
|
||||
bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
@ -297,13 +328,15 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
|
||||
|
||||
switch (conn->setting & SCO_AIRMODE_MASK) {
|
||||
case SCO_AIRMODE_TRANSP:
|
||||
if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
|
||||
if (!find_next_esco_param(conn, esco_param_msbc,
|
||||
ARRAY_SIZE(esco_param_msbc)))
|
||||
return false;
|
||||
param = &esco_param_msbc[conn->attempt - 1];
|
||||
break;
|
||||
case SCO_AIRMODE_CVSD:
|
||||
if (lmp_esco_capable(conn->link)) {
|
||||
if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
|
||||
if (!find_next_esco_param(conn, esco_param_cvsd,
|
||||
ARRAY_SIZE(esco_param_cvsd)))
|
||||
return false;
|
||||
param = &esco_param_cvsd[conn->attempt - 1];
|
||||
} else {
|
||||
|
@ -1362,8 +1362,10 @@ int hci_inquiry(void __user *arg)
|
||||
* cleared). If it is interrupted by a signal, return -EINTR.
|
||||
*/
|
||||
if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
|
||||
TASK_INTERRUPTIBLE))
|
||||
return -EINTR;
|
||||
TASK_INTERRUPTIBLE)) {
|
||||
err = -EINTR;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* for unlimited number of responses we will use buffer with
|
||||
@ -3051,12 +3053,15 @@ void hci_adv_monitors_clear(struct hci_dev *hdev)
|
||||
int handle;
|
||||
|
||||
idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
|
||||
hci_free_adv_monitor(monitor);
|
||||
hci_free_adv_monitor(hdev, monitor);
|
||||
|
||||
idr_destroy(&hdev->adv_monitors_idr);
|
||||
}
|
||||
|
||||
void hci_free_adv_monitor(struct adv_monitor *monitor)
|
||||
/* Frees the monitor structure and do some bookkeepings.
|
||||
* This function requires the caller holds hdev->lock.
|
||||
*/
|
||||
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
|
||||
{
|
||||
struct adv_pattern *pattern;
|
||||
struct adv_pattern *tmp;
|
||||
@ -3064,68 +3069,167 @@ void hci_free_adv_monitor(struct adv_monitor *monitor)
|
||||
if (!monitor)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
|
||||
list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
|
||||
list_del(&pattern->list);
|
||||
kfree(pattern);
|
||||
}
|
||||
|
||||
if (monitor->handle)
|
||||
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
|
||||
|
||||
if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
|
||||
hdev->adv_monitors_cnt--;
|
||||
mgmt_adv_monitor_removed(hdev, monitor->handle);
|
||||
}
|
||||
|
||||
kfree(monitor);
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
|
||||
int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
return mgmt_add_adv_patterns_monitor_complete(hdev, status);
|
||||
}
|
||||
|
||||
int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
return mgmt_remove_adv_monitor_complete(hdev, status);
|
||||
}
|
||||
|
||||
/* Assigns handle to a monitor, and if offloading is supported and power is on,
|
||||
* also attempts to forward the request to the controller.
|
||||
* Returns true if request is forwarded (result is pending), false otherwise.
|
||||
* This function requires the caller holds hdev->lock.
|
||||
*/
|
||||
bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
|
||||
int *err)
|
||||
{
|
||||
int min, max, handle;
|
||||
|
||||
if (!monitor)
|
||||
return -EINVAL;
|
||||
*err = 0;
|
||||
|
||||
if (!monitor) {
|
||||
*err = -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
min = HCI_MIN_ADV_MONITOR_HANDLE;
|
||||
max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
|
||||
handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
|
||||
GFP_KERNEL);
|
||||
if (handle < 0)
|
||||
return handle;
|
||||
|
||||
hdev->adv_monitors_cnt++;
|
||||
monitor->handle = handle;
|
||||
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int free_adv_monitor(int id, void *ptr, void *data)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
struct adv_monitor *monitor = ptr;
|
||||
|
||||
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
|
||||
hci_free_adv_monitor(monitor);
|
||||
hdev->adv_monitors_cnt--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
|
||||
{
|
||||
struct adv_monitor *monitor;
|
||||
|
||||
if (handle) {
|
||||
monitor = idr_find(&hdev->adv_monitors_idr, handle);
|
||||
if (!monitor)
|
||||
return -ENOENT;
|
||||
|
||||
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
|
||||
hci_free_adv_monitor(monitor);
|
||||
hdev->adv_monitors_cnt--;
|
||||
} else {
|
||||
/* Remove all monitors if handle is 0. */
|
||||
idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
|
||||
if (handle < 0) {
|
||||
*err = handle;
|
||||
return false;
|
||||
}
|
||||
|
||||
monitor->handle = handle;
|
||||
|
||||
if (!hdev_is_powered(hdev))
|
||||
return false;
|
||||
|
||||
switch (hci_get_adv_monitor_offload_ext(hdev)) {
|
||||
case HCI_ADV_MONITOR_EXT_NONE:
|
||||
hci_update_background_scan(hdev);
|
||||
bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
|
||||
/* Message was not forwarded to controller - not an error */
|
||||
return false;
|
||||
case HCI_ADV_MONITOR_EXT_MSFT:
|
||||
*err = msft_add_monitor_pattern(hdev, monitor);
|
||||
bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
|
||||
*err);
|
||||
break;
|
||||
}
|
||||
|
||||
return (*err == 0);
|
||||
}
|
||||
|
||||
/* Attempts to tell the controller and free the monitor. If somehow the
|
||||
* controller doesn't have a corresponding handle, remove anyway.
|
||||
* Returns true if request is forwarded (result is pending), false otherwise.
|
||||
* This function requires the caller holds hdev->lock.
|
||||
*/
|
||||
static bool hci_remove_adv_monitor(struct hci_dev *hdev,
|
||||
struct adv_monitor *monitor,
|
||||
u16 handle, int *err)
|
||||
{
|
||||
*err = 0;
|
||||
|
||||
switch (hci_get_adv_monitor_offload_ext(hdev)) {
|
||||
case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
|
||||
goto free_monitor;
|
||||
case HCI_ADV_MONITOR_EXT_MSFT:
|
||||
*err = msft_remove_monitor(hdev, monitor, handle);
|
||||
break;
|
||||
}
|
||||
|
||||
/* In case no matching handle registered, just free the monitor */
|
||||
if (*err == -ENOENT)
|
||||
goto free_monitor;
|
||||
|
||||
return (*err == 0);
|
||||
|
||||
free_monitor:
|
||||
if (*err == -ENOENT)
|
||||
bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
|
||||
monitor->handle);
|
||||
hci_free_adv_monitor(hdev, monitor);
|
||||
|
||||
*err = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Returns true if request is forwarded (result is pending), false otherwise.
|
||||
* This function requires the caller holds hdev->lock.
|
||||
*/
|
||||
bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
|
||||
{
|
||||
struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
|
||||
bool pending;
|
||||
|
||||
if (!monitor) {
|
||||
*err = -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
|
||||
if (!*err && !pending)
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
return 0;
|
||||
bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
|
||||
hdev->name, handle, *err, pending ? "" : "not ");
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
/* Returns true if request is forwarded (result is pending), false otherwise.
|
||||
* This function requires the caller holds hdev->lock.
|
||||
*/
|
||||
bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
|
||||
{
|
||||
struct adv_monitor *monitor;
|
||||
int idr_next_id = 0;
|
||||
bool pending = false;
|
||||
bool update = false;
|
||||
|
||||
*err = 0;
|
||||
|
||||
while (!*err && !pending) {
|
||||
monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
|
||||
if (!monitor)
|
||||
break;
|
||||
|
||||
pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
|
||||
|
||||
if (!*err && !pending)
|
||||
update = true;
|
||||
}
|
||||
|
||||
if (update)
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
|
||||
hdev->name, *err, pending ? "" : "not ");
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
@ -3134,6 +3238,14 @@ bool hci_is_adv_monitoring(struct hci_dev *hdev)
|
||||
return !idr_is_empty(&hdev->adv_monitors_idr);
|
||||
}
|
||||
|
||||
int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
|
||||
{
|
||||
if (msft_monitor_supported(hdev))
|
||||
return HCI_ADV_MONITOR_EXT_MSFT;
|
||||
|
||||
return HCI_ADV_MONITOR_EXT_NONE;
|
||||
}
|
||||
|
||||
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
|
||||
bdaddr_t *bdaddr, u8 type)
|
||||
{
|
||||
@ -3566,7 +3678,8 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
|
||||
}
|
||||
|
||||
/* Suspend notifier should only act on events when powered. */
|
||||
if (!hdev_is_powered(hdev))
|
||||
if (!hdev_is_powered(hdev) ||
|
||||
hci_dev_test_flag(hdev, HCI_UNREGISTER))
|
||||
goto done;
|
||||
|
||||
if (action == PM_SUSPEND_PREPARE) {
|
||||
@ -3827,10 +3940,12 @@ int hci_register_dev(struct hci_dev *hdev)
|
||||
hci_sock_dev_event(hdev, HCI_DEV_REG);
|
||||
hci_dev_hold(hdev);
|
||||
|
||||
if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
|
||||
hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
|
||||
error = register_pm_notifier(&hdev->suspend_notifier);
|
||||
if (error)
|
||||
goto err_wqueue;
|
||||
}
|
||||
|
||||
queue_work(hdev->req_workqueue, &hdev->power_on);
|
||||
|
||||
@ -3865,9 +3980,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
|
||||
cancel_work_sync(&hdev->power_on);
|
||||
|
||||
if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
|
||||
hci_suspend_clear_tasks(hdev);
|
||||
unregister_pm_notifier(&hdev->suspend_notifier);
|
||||
cancel_work_sync(&hdev->suspend_prepare);
|
||||
}
|
||||
|
||||
hci_dev_do_close(hdev);
|
||||
|
||||
|
@ -237,7 +237,7 @@ static int conn_info_min_age_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
|
||||
conn_info_min_age_set, "%llu\n");
|
||||
|
||||
static int conn_info_max_age_set(void *data, u64 val)
|
||||
@ -265,7 +265,7 @@ static int conn_info_max_age_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
|
||||
conn_info_max_age_set, "%llu\n");
|
||||
|
||||
static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
|
||||
@ -419,7 +419,7 @@ static int voice_setting_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(voice_setting_fops, voice_setting_get,
|
||||
NULL, "0x%4.4llx\n");
|
||||
|
||||
static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf,
|
||||
@ -476,7 +476,7 @@ static int min_encrypt_key_size_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(min_encrypt_key_size_fops,
|
||||
min_encrypt_key_size_get,
|
||||
min_encrypt_key_size_set, "%llu\n");
|
||||
|
||||
@ -491,7 +491,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
|
||||
auto_accept_delay_set, "%llu\n");
|
||||
|
||||
static ssize_t force_bredr_smp_read(struct file *file,
|
||||
@ -558,7 +558,7 @@ static int idle_timeout_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
|
||||
idle_timeout_set, "%llu\n");
|
||||
|
||||
static int sniff_min_interval_set(void *data, u64 val)
|
||||
@ -586,7 +586,7 @@ static int sniff_min_interval_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
|
||||
sniff_min_interval_set, "%llu\n");
|
||||
|
||||
static int sniff_max_interval_set(void *data, u64 val)
|
||||
@ -614,7 +614,7 @@ static int sniff_max_interval_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
|
||||
sniff_max_interval_set, "%llu\n");
|
||||
|
||||
void hci_debugfs_create_bredr(struct hci_dev *hdev)
|
||||
@ -706,7 +706,7 @@ static int rpa_timeout_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
|
||||
rpa_timeout_set, "%llu\n");
|
||||
|
||||
static int random_address_show(struct seq_file *f, void *p)
|
||||
@ -869,7 +869,7 @@ static int conn_min_interval_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
|
||||
conn_min_interval_set, "%llu\n");
|
||||
|
||||
static int conn_max_interval_set(void *data, u64 val)
|
||||
@ -897,7 +897,7 @@ static int conn_max_interval_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
|
||||
conn_max_interval_set, "%llu\n");
|
||||
|
||||
static int conn_latency_set(void *data, u64 val)
|
||||
@ -925,7 +925,7 @@ static int conn_latency_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(conn_latency_fops, conn_latency_get,
|
||||
conn_latency_set, "%llu\n");
|
||||
|
||||
static int supervision_timeout_set(void *data, u64 val)
|
||||
@ -953,7 +953,7 @@ static int supervision_timeout_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
|
||||
supervision_timeout_set, "%llu\n");
|
||||
|
||||
static int adv_channel_map_set(void *data, u64 val)
|
||||
@ -981,7 +981,7 @@ static int adv_channel_map_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
|
||||
adv_channel_map_set, "%llu\n");
|
||||
|
||||
static int adv_min_interval_set(void *data, u64 val)
|
||||
@ -1009,7 +1009,7 @@ static int adv_min_interval_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
|
||||
adv_min_interval_set, "%llu\n");
|
||||
|
||||
static int adv_max_interval_set(void *data, u64 val)
|
||||
@ -1037,7 +1037,7 @@ static int adv_max_interval_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
|
||||
adv_max_interval_set, "%llu\n");
|
||||
|
||||
static int min_key_size_set(void *data, u64 val)
|
||||
@ -1065,7 +1065,7 @@ static int min_key_size_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(min_key_size_fops, min_key_size_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(min_key_size_fops, min_key_size_get,
|
||||
min_key_size_set, "%llu\n");
|
||||
|
||||
static int max_key_size_set(void *data, u64 val)
|
||||
@ -1093,7 +1093,7 @@ static int max_key_size_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(max_key_size_fops, max_key_size_get,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(max_key_size_fops, max_key_size_get,
|
||||
max_key_size_set, "%llu\n");
|
||||
|
||||
static int auth_payload_timeout_set(void *data, u64 val)
|
||||
@ -1121,7 +1121,7 @@ static int auth_payload_timeout_get(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(auth_payload_timeout_fops,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(auth_payload_timeout_fops,
|
||||
auth_payload_timeout_get,
|
||||
auth_payload_timeout_set, "%llu\n");
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
#include "smp.h"
|
||||
#include "hci_request.h"
|
||||
#include "msft.h"
|
||||
|
||||
#define HCI_REQ_DONE 0
|
||||
#define HCI_REQ_PEND 1
|
||||
@ -404,13 +405,18 @@ static void cancel_interleave_scan(struct hci_dev *hdev)
|
||||
*/
|
||||
static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
|
||||
{
|
||||
/* If there is at least one ADV monitors and one pending LE connection
|
||||
* or one device to be scanned for, we should alternate between
|
||||
* allowlist scan and one without any filters to save power.
|
||||
/* Do interleaved scan only if all of the following are true:
|
||||
* - There is at least one ADV monitor
|
||||
* - At least one pending LE connection or one device to be scanned for
|
||||
* - Monitor offloading is not supported
|
||||
* If so, we should alternate between allowlist scan and one without
|
||||
* any filters to save power.
|
||||
*/
|
||||
bool use_interleaving = hci_is_adv_monitoring(hdev) &&
|
||||
!(list_empty(&hdev->pend_le_conns) &&
|
||||
list_empty(&hdev->pend_le_reports));
|
||||
list_empty(&hdev->pend_le_reports)) &&
|
||||
hci_get_adv_monitor_offload_ext(hdev) ==
|
||||
HCI_ADV_MONITOR_EXT_NONE;
|
||||
bool is_interleaving = is_interleave_scanning(hdev);
|
||||
|
||||
if (use_interleaving && !is_interleaving) {
|
||||
@ -899,14 +905,11 @@ static u8 update_white_list(struct hci_request *req)
|
||||
|
||||
/* Use the allowlist unless the following conditions are all true:
|
||||
* - We are not currently suspending
|
||||
* - There are 1 or more ADV monitors registered
|
||||
* - There are 1 or more ADV monitors registered and it's not offloaded
|
||||
* - Interleaved scanning is not currently using the allowlist
|
||||
*
|
||||
* Once the controller offloading of advertisement monitor is in place,
|
||||
* the above condition should include the support of MSFT extension
|
||||
* support.
|
||||
*/
|
||||
if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
|
||||
hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
|
||||
hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
|
||||
return 0x00;
|
||||
|
||||
@ -1087,6 +1090,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
|
||||
if (hdev->suspended) {
|
||||
window = hdev->le_scan_window_suspend;
|
||||
interval = hdev->le_scan_int_suspend;
|
||||
|
||||
set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
|
||||
} else if (hci_is_le_conn_scanning(hdev)) {
|
||||
window = hdev->le_scan_window_connect;
|
||||
interval = hdev->le_scan_int_connect;
|
||||
@ -1170,19 +1175,6 @@ static void hci_req_set_event_filter(struct hci_request *req)
|
||||
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
|
||||
}
|
||||
|
||||
static void hci_req_config_le_suspend_scan(struct hci_request *req)
|
||||
{
|
||||
/* Before changing params disable scan if enabled */
|
||||
if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
|
||||
hci_req_add_le_scan_disable(req, false);
|
||||
|
||||
/* Configure params and enable scanning */
|
||||
hci_req_add_le_passive_scan(req);
|
||||
|
||||
/* Block suspend notifier on response */
|
||||
set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
|
||||
}
|
||||
|
||||
static void cancel_adv_timeout(struct hci_dev *hdev)
|
||||
{
|
||||
if (hdev->adv_instance_timeout) {
|
||||
@ -1245,10 +1237,35 @@ static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
{
|
||||
bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
|
||||
status);
|
||||
if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
|
||||
test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
|
||||
if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
|
||||
test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
|
||||
clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
|
||||
clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
|
||||
if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
|
||||
clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
}
|
||||
|
||||
static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
|
||||
bool enable)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
|
||||
switch (hci_get_adv_monitor_offload_ext(hdev)) {
|
||||
case HCI_ADV_MONITOR_EXT_MSFT:
|
||||
msft_req_add_set_filter_enable(req, enable);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
/* No need to block when enabling since it's on resume path */
|
||||
if (hdev->suspended && !enable)
|
||||
set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
|
||||
}
|
||||
|
||||
/* Call with hci_dev_lock */
|
||||
@ -1308,6 +1325,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
|
||||
hci_req_add_le_scan_disable(&req, false);
|
||||
}
|
||||
|
||||
/* Disable advertisement filters */
|
||||
hci_req_add_set_adv_filter_enable(&req, false);
|
||||
|
||||
/* Mark task needing completion */
|
||||
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
|
||||
|
||||
@ -1336,7 +1356,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
|
||||
/* Enable event filter for paired devices */
|
||||
hci_req_set_event_filter(&req);
|
||||
/* Enable passive scan at lower duty cycle */
|
||||
hci_req_config_le_suspend_scan(&req);
|
||||
__hci_update_background_scan(&req);
|
||||
/* Pause scan changes again. */
|
||||
hdev->scanning_paused = true;
|
||||
hci_req_run(&req, suspend_req_complete);
|
||||
@ -1346,7 +1366,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
|
||||
|
||||
hci_req_clear_event_filter(&req);
|
||||
/* Reset passive/background scanning to normal */
|
||||
hci_req_config_le_suspend_scan(&req);
|
||||
__hci_update_background_scan(&req);
|
||||
/* Enable all of the advertisement filters */
|
||||
hci_req_add_set_adv_filter_enable(&req, true);
|
||||
|
||||
/* Unpause directed advertising */
|
||||
hdev->advertising_paused = false;
|
||||
|
@ -4519,6 +4519,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
|
||||
}
|
||||
goto done;
|
||||
|
||||
case L2CAP_CONF_UNKNOWN:
|
||||
case L2CAP_CONF_UNACCEPT:
|
||||
if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
|
||||
char req[64];
|
||||
@ -8276,10 +8277,73 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
}
|
||||
|
||||
/* Append fragment into frame respecting the maximum len of rx_skb */
|
||||
static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
|
||||
u16 len)
|
||||
{
|
||||
if (!conn->rx_skb) {
|
||||
/* Allocate skb for the complete frame (with header) */
|
||||
conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
|
||||
if (!conn->rx_skb)
|
||||
return -ENOMEM;
|
||||
/* Init rx_len */
|
||||
conn->rx_len = len;
|
||||
}
|
||||
|
||||
/* Copy as much as the rx_skb can hold */
|
||||
len = min_t(u16, len, skb->len);
|
||||
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
|
||||
skb_pull(skb, len);
|
||||
conn->rx_len -= len;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *rx_skb;
|
||||
int len;
|
||||
|
||||
/* Append just enough to complete the header */
|
||||
len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
|
||||
|
||||
/* If header could not be read just continue */
|
||||
if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
|
||||
return len;
|
||||
|
||||
rx_skb = conn->rx_skb;
|
||||
len = get_unaligned_le16(rx_skb->data);
|
||||
|
||||
/* Check if rx_skb has enough space to received all fragments */
|
||||
if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
|
||||
/* Update expected len */
|
||||
conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
|
||||
return L2CAP_LEN_SIZE;
|
||||
}
|
||||
|
||||
/* Reset conn->rx_skb since it will need to be reallocated in order to
|
||||
* fit all fragments.
|
||||
*/
|
||||
conn->rx_skb = NULL;
|
||||
|
||||
/* Reallocates rx_skb using the exact expected length */
|
||||
len = l2cap_recv_frag(conn, rx_skb,
|
||||
len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
|
||||
kfree_skb(rx_skb);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void l2cap_recv_reset(struct l2cap_conn *conn)
|
||||
{
|
||||
kfree_skb(conn->rx_skb);
|
||||
conn->rx_skb = NULL;
|
||||
conn->rx_len = 0;
|
||||
}
|
||||
|
||||
void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
|
||||
{
|
||||
struct l2cap_conn *conn = hcon->l2cap_data;
|
||||
struct l2cap_hdr *hdr;
|
||||
int len;
|
||||
|
||||
/* For AMP controller do not create l2cap conn */
|
||||
@ -8298,23 +8362,23 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
|
||||
case ACL_START:
|
||||
case ACL_START_NO_FLUSH:
|
||||
case ACL_COMPLETE:
|
||||
if (conn->rx_len) {
|
||||
if (conn->rx_skb) {
|
||||
BT_ERR("Unexpected start frame (len %d)", skb->len);
|
||||
kfree_skb(conn->rx_skb);
|
||||
conn->rx_skb = NULL;
|
||||
conn->rx_len = 0;
|
||||
l2cap_recv_reset(conn);
|
||||
l2cap_conn_unreliable(conn, ECOMM);
|
||||
}
|
||||
|
||||
/* Start fragment always begin with Basic L2CAP header */
|
||||
if (skb->len < L2CAP_HDR_SIZE) {
|
||||
BT_ERR("Frame is too short (len %d)", skb->len);
|
||||
l2cap_conn_unreliable(conn, ECOMM);
|
||||
/* Start fragment may not contain the L2CAP length so just
|
||||
* copy the initial byte when that happens and use conn->mtu as
|
||||
* expected length.
|
||||
*/
|
||||
if (skb->len < L2CAP_LEN_SIZE) {
|
||||
if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
|
||||
goto drop;
|
||||
return;
|
||||
}
|
||||
|
||||
hdr = (struct l2cap_hdr *) skb->data;
|
||||
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
|
||||
len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
|
||||
|
||||
if (len == skb->len) {
|
||||
/* Complete frame received */
|
||||
@ -8331,38 +8395,43 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Allocate skb for the complete frame (with header) */
|
||||
conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
|
||||
if (!conn->rx_skb)
|
||||
/* Append fragment into frame (with header) */
|
||||
if (l2cap_recv_frag(conn, skb, len) < 0)
|
||||
goto drop;
|
||||
|
||||
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
|
||||
skb->len);
|
||||
conn->rx_len = len - skb->len;
|
||||
break;
|
||||
|
||||
case ACL_CONT:
|
||||
BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
|
||||
|
||||
if (!conn->rx_len) {
|
||||
if (!conn->rx_skb) {
|
||||
BT_ERR("Unexpected continuation frame (len %d)", skb->len);
|
||||
l2cap_conn_unreliable(conn, ECOMM);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (skb->len > conn->rx_len) {
|
||||
BT_ERR("Fragment is too long (len %d, expected %d)",
|
||||
skb->len, conn->rx_len);
|
||||
kfree_skb(conn->rx_skb);
|
||||
conn->rx_skb = NULL;
|
||||
conn->rx_len = 0;
|
||||
/* Complete the L2CAP length if it has not been read */
|
||||
if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
|
||||
if (l2cap_recv_len(conn, skb) < 0) {
|
||||
l2cap_conn_unreliable(conn, ECOMM);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
|
||||
skb->len);
|
||||
conn->rx_len -= skb->len;
|
||||
/* Header still could not be read just continue */
|
||||
if (conn->rx_skb->len < L2CAP_LEN_SIZE)
|
||||
return;
|
||||
}
|
||||
|
||||
if (skb->len > conn->rx_len) {
|
||||
BT_ERR("Fragment is too long (len %d, expected %d)",
|
||||
skb->len, conn->rx_len);
|
||||
l2cap_recv_reset(conn);
|
||||
l2cap_conn_unreliable(conn, ECOMM);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Append fragment into frame (with header) */
|
||||
l2cap_recv_frag(conn, skb, skb->len);
|
||||
|
||||
if (!conn->rx_len) {
|
||||
/* Complete frame received. l2cap_recv_frame
|
||||
|
@ -124,6 +124,7 @@ static const u16 mgmt_commands[] = {
|
||||
MGMT_OP_REMOVE_ADV_MONITOR,
|
||||
MGMT_OP_ADD_EXT_ADV_PARAMS,
|
||||
MGMT_OP_ADD_EXT_ADV_DATA,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
|
||||
};
|
||||
|
||||
static const u16 mgmt_events[] = {
|
||||
@ -4166,14 +4167,24 @@ static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
|
||||
mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
|
||||
}
|
||||
|
||||
static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
|
||||
u16 handle)
|
||||
void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
|
||||
{
|
||||
struct mgmt_ev_adv_monitor_added ev;
|
||||
struct mgmt_ev_adv_monitor_removed ev;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct sock *sk_skip = NULL;
|
||||
struct mgmt_cp_remove_adv_monitor *cp;
|
||||
|
||||
cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
|
||||
if (cmd) {
|
||||
cp = cmd->param;
|
||||
|
||||
if (cp->monitor_handle)
|
||||
sk_skip = cmd->sk;
|
||||
}
|
||||
|
||||
ev.monitor_handle = cpu_to_le16(handle);
|
||||
|
||||
mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
|
||||
mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
|
||||
}
|
||||
|
||||
static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
|
||||
@ -4184,6 +4195,7 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
|
||||
int handle, err;
|
||||
size_t rp_size = 0;
|
||||
__u32 supported = 0;
|
||||
__u32 enabled = 0;
|
||||
__u16 num_handles = 0;
|
||||
__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
|
||||
|
||||
@ -4191,12 +4203,11 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
|
||||
if (msft_monitor_supported(hdev))
|
||||
supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
|
||||
|
||||
idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
|
||||
idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
|
||||
handles[num_handles++] = monitor->handle;
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -4205,11 +4216,11 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
|
||||
if (!rp)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Once controller-based monitoring is in place, the enabled_features
|
||||
* should reflect the use.
|
||||
*/
|
||||
/* All supported features are currently enabled */
|
||||
enabled = supported;
|
||||
|
||||
rp->supported_features = cpu_to_le32(supported);
|
||||
rp->enabled_features = 0;
|
||||
rp->enabled_features = cpu_to_le32(enabled);
|
||||
rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
|
||||
rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
|
||||
rp->num_handles = cpu_to_le16(num_handles);
|
||||
@ -4225,105 +4236,267 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct mgmt_cp_add_adv_patterns_monitor *cp = data;
|
||||
struct mgmt_rp_add_adv_patterns_monitor rp;
|
||||
struct adv_monitor *m = NULL;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct adv_monitor *monitor;
|
||||
int err = 0;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
|
||||
if (!cmd) {
|
||||
cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
|
||||
if (!cmd)
|
||||
goto done;
|
||||
}
|
||||
|
||||
monitor = cmd->user_data;
|
||||
rp.monitor_handle = cpu_to_le16(monitor->handle);
|
||||
|
||||
if (!status) {
|
||||
mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
|
||||
hdev->adv_monitors_cnt++;
|
||||
if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
|
||||
monitor->state = ADV_MONITOR_STATE_REGISTERED;
|
||||
hci_update_background_scan(hdev);
|
||||
}
|
||||
|
||||
err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
|
||||
mgmt_status(status), &rp, sizeof(rp));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
done:
|
||||
hci_dev_unlock(hdev);
|
||||
bt_dev_dbg(hdev, "add monitor %d complete, status %d",
|
||||
rp.monitor_handle, status);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
|
||||
struct adv_monitor *m, u8 status,
|
||||
void *data, u16 len, u16 op)
|
||||
{
|
||||
struct mgmt_rp_add_adv_patterns_monitor rp;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
int err;
|
||||
bool pending;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (status)
|
||||
goto unlock;
|
||||
|
||||
if (pending_find(MGMT_OP_SET_LE, hdev) ||
|
||||
pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
|
||||
pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
|
||||
pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
|
||||
status = MGMT_STATUS_BUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cmd = mgmt_pending_add(sk, op, hdev, data, len);
|
||||
if (!cmd) {
|
||||
status = MGMT_STATUS_NO_RESOURCES;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cmd->user_data = m;
|
||||
pending = hci_add_adv_monitor(hdev, m, &err);
|
||||
if (err) {
|
||||
if (err == -ENOSPC || err == -ENOMEM)
|
||||
status = MGMT_STATUS_NO_RESOURCES;
|
||||
else if (err == -EINVAL)
|
||||
status = MGMT_STATUS_INVALID_PARAMS;
|
||||
else
|
||||
status = MGMT_STATUS_FAILED;
|
||||
|
||||
mgmt_pending_remove(cmd);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!pending) {
|
||||
mgmt_pending_remove(cmd);
|
||||
rp.monitor_handle = cpu_to_le16(m->handle);
|
||||
mgmt_adv_monitor_added(sk, hdev, m->handle);
|
||||
m->state = ADV_MONITOR_STATE_REGISTERED;
|
||||
hdev->adv_monitors_cnt++;
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
|
||||
&rp, sizeof(rp));
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
hci_free_adv_monitor(hdev, m);
|
||||
hci_dev_unlock(hdev);
|
||||
return mgmt_cmd_status(sk, hdev->id, op, status);
|
||||
}
|
||||
|
||||
static void parse_adv_monitor_rssi(struct adv_monitor *m,
|
||||
struct mgmt_adv_rssi_thresholds *rssi)
|
||||
{
|
||||
if (rssi) {
|
||||
m->rssi.low_threshold = rssi->low_threshold;
|
||||
m->rssi.low_threshold_timeout =
|
||||
__le16_to_cpu(rssi->low_threshold_timeout);
|
||||
m->rssi.high_threshold = rssi->high_threshold;
|
||||
m->rssi.high_threshold_timeout =
|
||||
__le16_to_cpu(rssi->high_threshold_timeout);
|
||||
m->rssi.sampling_period = rssi->sampling_period;
|
||||
} else {
|
||||
/* Default values. These numbers are the least constricting
|
||||
* parameters for MSFT API to work, so it behaves as if there
|
||||
* are no rssi parameter to consider. May need to be changed
|
||||
* if other API are to be supported.
|
||||
*/
|
||||
m->rssi.low_threshold = -127;
|
||||
m->rssi.low_threshold_timeout = 60;
|
||||
m->rssi.high_threshold = -127;
|
||||
m->rssi.high_threshold_timeout = 0;
|
||||
m->rssi.sampling_period = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
|
||||
struct mgmt_adv_pattern *patterns)
|
||||
{
|
||||
u8 offset = 0, length = 0;
|
||||
struct adv_pattern *p = NULL;
|
||||
unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
|
||||
__u8 cp_ofst = 0, cp_len = 0;
|
||||
int err, i;
|
||||
int i;
|
||||
|
||||
BT_DBG("request for %s", hdev->name);
|
||||
|
||||
if (len <= sizeof(*cp) || cp->pattern_count == 0) {
|
||||
err = mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
m = kmalloc(sizeof(*m), GFP_KERNEL);
|
||||
if (!m) {
|
||||
err = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&m->patterns);
|
||||
m->active = false;
|
||||
|
||||
for (i = 0; i < cp->pattern_count; i++) {
|
||||
if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
|
||||
err = mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
cp_ofst = cp->patterns[i].offset;
|
||||
cp_len = cp->patterns[i].length;
|
||||
if (cp_ofst >= HCI_MAX_AD_LENGTH ||
|
||||
cp_len > HCI_MAX_AD_LENGTH ||
|
||||
(cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
|
||||
err = mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
goto failed;
|
||||
}
|
||||
for (i = 0; i < pattern_count; i++) {
|
||||
offset = patterns[i].offset;
|
||||
length = patterns[i].length;
|
||||
if (offset >= HCI_MAX_AD_LENGTH ||
|
||||
length > HCI_MAX_AD_LENGTH ||
|
||||
(offset + length) > HCI_MAX_AD_LENGTH)
|
||||
return MGMT_STATUS_INVALID_PARAMS;
|
||||
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p) {
|
||||
err = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
if (!p)
|
||||
return MGMT_STATUS_NO_RESOURCES;
|
||||
|
||||
p->ad_type = cp->patterns[i].ad_type;
|
||||
p->offset = cp->patterns[i].offset;
|
||||
p->length = cp->patterns[i].length;
|
||||
memcpy(p->value, cp->patterns[i].value, p->length);
|
||||
p->ad_type = patterns[i].ad_type;
|
||||
p->offset = patterns[i].offset;
|
||||
p->length = patterns[i].length;
|
||||
memcpy(p->value, patterns[i].value, p->length);
|
||||
|
||||
INIT_LIST_HEAD(&p->list);
|
||||
list_add(&p->list, &m->patterns);
|
||||
}
|
||||
|
||||
if (mp_cnt != cp->pattern_count) {
|
||||
err = mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
goto failed;
|
||||
return MGMT_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
struct mgmt_cp_add_adv_patterns_monitor *cp = data;
|
||||
struct adv_monitor *m = NULL;
|
||||
u8 status = MGMT_STATUS_SUCCESS;
|
||||
size_t expected_size = sizeof(*cp);
|
||||
|
||||
BT_DBG("request for %s", hdev->name);
|
||||
|
||||
if (len <= sizeof(*cp)) {
|
||||
status = MGMT_STATUS_INVALID_PARAMS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
|
||||
if (len != expected_size) {
|
||||
status = MGMT_STATUS_INVALID_PARAMS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
m = kzalloc(sizeof(*m), GFP_KERNEL);
|
||||
if (!m) {
|
||||
status = MGMT_STATUS_NO_RESOURCES;
|
||||
goto done;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&m->patterns);
|
||||
|
||||
parse_adv_monitor_rssi(m, NULL);
|
||||
status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
|
||||
|
||||
done:
|
||||
return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
|
||||
}
|
||||
|
||||
static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
|
||||
struct adv_monitor *m = NULL;
|
||||
u8 status = MGMT_STATUS_SUCCESS;
|
||||
size_t expected_size = sizeof(*cp);
|
||||
|
||||
BT_DBG("request for %s", hdev->name);
|
||||
|
||||
if (len <= sizeof(*cp)) {
|
||||
status = MGMT_STATUS_INVALID_PARAMS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
|
||||
if (len != expected_size) {
|
||||
status = MGMT_STATUS_INVALID_PARAMS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
m = kzalloc(sizeof(*m), GFP_KERNEL);
|
||||
if (!m) {
|
||||
status = MGMT_STATUS_NO_RESOURCES;
|
||||
goto done;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&m->patterns);
|
||||
|
||||
parse_adv_monitor_rssi(m, &cp->rssi);
|
||||
status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
|
||||
|
||||
done:
|
||||
return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
|
||||
}
|
||||
|
||||
int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct mgmt_rp_remove_adv_monitor rp;
|
||||
struct mgmt_cp_remove_adv_monitor *cp;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
int err = 0;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
|
||||
cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
|
||||
if (!cmd)
|
||||
goto done;
|
||||
|
||||
err = hci_add_adv_monitor(hdev, m);
|
||||
if (err) {
|
||||
if (err == -ENOSPC) {
|
||||
mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
|
||||
MGMT_STATUS_NO_RESOURCES);
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
cp = cmd->param;
|
||||
rp.monitor_handle = cp->monitor_handle;
|
||||
|
||||
if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
|
||||
mgmt_adv_monitor_added(sk, hdev, m->handle);
|
||||
if (!status)
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
|
||||
mgmt_status(status), &rp, sizeof(rp));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
done:
|
||||
hci_dev_unlock(hdev);
|
||||
bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
|
||||
rp.monitor_handle, status);
|
||||
|
||||
rp.monitor_handle = cpu_to_le16(m->handle);
|
||||
|
||||
return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
|
||||
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
failed:
|
||||
hci_free_adv_monitor(m);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -4332,37 +4505,64 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
|
||||
{
|
||||
struct mgmt_cp_remove_adv_monitor *cp = data;
|
||||
struct mgmt_rp_remove_adv_monitor rp;
|
||||
unsigned int prev_adv_monitors_cnt;
|
||||
u16 handle;
|
||||
int err;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
u16 handle = __le16_to_cpu(cp->monitor_handle);
|
||||
int err, status;
|
||||
bool pending;
|
||||
|
||||
BT_DBG("request for %s", hdev->name);
|
||||
rp.monitor_handle = cp->monitor_handle;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
handle = __le16_to_cpu(cp->monitor_handle);
|
||||
prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
|
||||
|
||||
err = hci_remove_adv_monitor(hdev, handle);
|
||||
if (err == -ENOENT) {
|
||||
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
|
||||
MGMT_STATUS_INVALID_INDEX);
|
||||
if (pending_find(MGMT_OP_SET_LE, hdev) ||
|
||||
pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
|
||||
pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
|
||||
pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
|
||||
status = MGMT_STATUS_BUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
|
||||
mgmt_adv_monitor_removed(sk, hdev, handle);
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
|
||||
if (!cmd) {
|
||||
status = MGMT_STATUS_NO_RESOURCES;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (handle)
|
||||
pending = hci_remove_single_adv_monitor(hdev, handle, &err);
|
||||
else
|
||||
pending = hci_remove_all_adv_monitor(hdev, &err);
|
||||
|
||||
if (err) {
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
if (err == -ENOENT)
|
||||
status = MGMT_STATUS_INVALID_INDEX;
|
||||
else
|
||||
status = MGMT_STATUS_FAILED;
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* monitor can be removed without forwarding request to controller */
|
||||
if (!pending) {
|
||||
mgmt_pending_remove(cmd);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
rp.monitor_handle = cp->monitor_handle;
|
||||
return mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_REMOVE_ADV_MONITOR,
|
||||
MGMT_STATUS_SUCCESS,
|
||||
&rp, sizeof(rp));
|
||||
}
|
||||
|
||||
return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
|
||||
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
|
||||
hci_dev_unlock(hdev);
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
return err;
|
||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
|
||||
status);
|
||||
}
|
||||
|
||||
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
|
||||
@ -4798,6 +4998,14 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (hdev->discovery_paused) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_START_SERVICE_DISCOVERY,
|
||||
MGMT_STATUS_BUSY, &cp->type,
|
||||
sizeof(cp->type));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
uuid_count = __le16_to_cpu(cp->uuid_count);
|
||||
if (uuid_count > max_uuid_count) {
|
||||
bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
|
||||
@ -8234,6 +8442,9 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
|
||||
HCI_MGMT_VAR_LEN },
|
||||
{ add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
|
||||
HCI_MGMT_VAR_LEN },
|
||||
{ add_adv_patterns_monitor_rssi,
|
||||
MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
|
||||
HCI_MGMT_VAR_LEN },
|
||||
};
|
||||
|
||||
void mgmt_index_added(struct hci_dev *hdev)
|
||||
|
@ -5,9 +5,16 @@
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_core.h>
|
||||
#include <net/bluetooth/mgmt.h>
|
||||
|
||||
#include "hci_request.h"
|
||||
#include "mgmt_util.h"
|
||||
#include "msft.h"
|
||||
|
||||
#define MSFT_RSSI_THRESHOLD_VALUE_MIN -127
|
||||
#define MSFT_RSSI_THRESHOLD_VALUE_MAX 20
|
||||
#define MSFT_RSSI_LOW_TIMEOUT_MAX 0x3C
|
||||
|
||||
#define MSFT_OP_READ_SUPPORTED_FEATURES 0x00
|
||||
struct msft_cp_read_supported_features {
|
||||
__u8 sub_opcode;
|
||||
@ -21,12 +28,83 @@ struct msft_rp_read_supported_features {
|
||||
__u8 evt_prefix[];
|
||||
} __packed;
|
||||
|
||||
#define MSFT_OP_LE_MONITOR_ADVERTISEMENT 0x03
|
||||
#define MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN 0x01
|
||||
struct msft_le_monitor_advertisement_pattern {
|
||||
__u8 length;
|
||||
__u8 data_type;
|
||||
__u8 start_byte;
|
||||
__u8 pattern[0];
|
||||
};
|
||||
|
||||
struct msft_le_monitor_advertisement_pattern_data {
|
||||
__u8 count;
|
||||
__u8 data[0];
|
||||
};
|
||||
|
||||
struct msft_cp_le_monitor_advertisement {
|
||||
__u8 sub_opcode;
|
||||
__s8 rssi_high;
|
||||
__s8 rssi_low;
|
||||
__u8 rssi_low_interval;
|
||||
__u8 rssi_sampling_period;
|
||||
__u8 cond_type;
|
||||
__u8 data[0];
|
||||
} __packed;
|
||||
|
||||
struct msft_rp_le_monitor_advertisement {
|
||||
__u8 status;
|
||||
__u8 sub_opcode;
|
||||
__u8 handle;
|
||||
} __packed;
|
||||
|
||||
#define MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT 0x04
|
||||
struct msft_cp_le_cancel_monitor_advertisement {
|
||||
__u8 sub_opcode;
|
||||
__u8 handle;
|
||||
} __packed;
|
||||
|
||||
struct msft_rp_le_cancel_monitor_advertisement {
|
||||
__u8 status;
|
||||
__u8 sub_opcode;
|
||||
} __packed;
|
||||
|
||||
#define MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE 0x05
|
||||
struct msft_cp_le_set_advertisement_filter_enable {
|
||||
__u8 sub_opcode;
|
||||
__u8 enable;
|
||||
} __packed;
|
||||
|
||||
struct msft_rp_le_set_advertisement_filter_enable {
|
||||
__u8 status;
|
||||
__u8 sub_opcode;
|
||||
} __packed;
|
||||
|
||||
struct msft_monitor_advertisement_handle_data {
|
||||
__u8 msft_handle;
|
||||
__u16 mgmt_handle;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct msft_data {
|
||||
__u64 features;
|
||||
__u8 evt_prefix_len;
|
||||
__u8 *evt_prefix;
|
||||
struct list_head handle_map;
|
||||
__u16 pending_add_handle;
|
||||
__u16 pending_remove_handle;
|
||||
__u8 reregistering;
|
||||
__u8 filter_enabled;
|
||||
};
|
||||
|
||||
static int __msft_add_monitor_pattern(struct hci_dev *hdev,
|
||||
struct adv_monitor *monitor);
|
||||
|
||||
bool msft_monitor_supported(struct hci_dev *hdev)
|
||||
{
|
||||
return !!(msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR);
|
||||
}
|
||||
|
||||
static bool read_supported_features(struct hci_dev *hdev,
|
||||
struct msft_data *msft)
|
||||
{
|
||||
@ -72,6 +150,35 @@ failed:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static void reregister_monitor_on_restart(struct hci_dev *hdev, int handle)
|
||||
{
|
||||
struct adv_monitor *monitor;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
int err;
|
||||
|
||||
while (1) {
|
||||
monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
|
||||
if (!monitor) {
|
||||
/* All monitors have been reregistered */
|
||||
msft->reregistering = false;
|
||||
hci_update_background_scan(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
msft->pending_add_handle = (u16)handle;
|
||||
err = __msft_add_monitor_pattern(hdev, monitor);
|
||||
|
||||
/* If success, we return and wait for monitor added callback */
|
||||
if (!err)
|
||||
return;
|
||||
|
||||
/* Otherwise remove the monitor and keep registering */
|
||||
hci_free_adv_monitor(hdev, monitor);
|
||||
handle++;
|
||||
}
|
||||
}
|
||||
|
||||
void msft_do_open(struct hci_dev *hdev)
|
||||
{
|
||||
struct msft_data *msft;
|
||||
@ -90,12 +197,21 @@ void msft_do_open(struct hci_dev *hdev)
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&msft->handle_map);
|
||||
hdev->msft_data = msft;
|
||||
|
||||
if (msft_monitor_supported(hdev)) {
|
||||
msft->reregistering = true;
|
||||
msft_set_filter_enable(hdev, true);
|
||||
reregister_monitor_on_restart(hdev, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void msft_do_close(struct hci_dev *hdev)
|
||||
{
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
struct msft_monitor_advertisement_handle_data *handle_data, *tmp;
|
||||
struct adv_monitor *monitor;
|
||||
|
||||
if (!msft)
|
||||
return;
|
||||
@ -104,6 +220,17 @@ void msft_do_close(struct hci_dev *hdev)
|
||||
|
||||
hdev->msft_data = NULL;
|
||||
|
||||
list_for_each_entry_safe(handle_data, tmp, &msft->handle_map, list) {
|
||||
monitor = idr_find(&hdev->adv_monitors_idr,
|
||||
handle_data->mgmt_handle);
|
||||
|
||||
if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
|
||||
monitor->state = ADV_MONITOR_STATE_REGISTERED;
|
||||
|
||||
list_del(&handle_data->list);
|
||||
kfree(handle_data);
|
||||
}
|
||||
|
||||
kfree(msft->evt_prefix);
|
||||
kfree(msft);
|
||||
}
|
||||
@ -147,3 +274,334 @@ __u64 msft_get_features(struct hci_dev *hdev)
|
||||
|
||||
return msft ? msft->features : 0;
|
||||
}
|
||||
|
||||
/* is_mgmt = true matches the handle exposed to userspace via mgmt.
|
||||
* is_mgmt = false matches the handle used by the msft controller.
|
||||
* This function requires the caller holds hdev->lock
|
||||
*/
|
||||
static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
|
||||
(struct hci_dev *hdev, u16 handle, bool is_mgmt)
|
||||
{
|
||||
struct msft_monitor_advertisement_handle_data *entry;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
|
||||
list_for_each_entry(entry, &msft->handle_map, list) {
|
||||
if (is_mgmt && entry->mgmt_handle == handle)
|
||||
return entry;
|
||||
if (!is_mgmt && entry->msft_handle == handle)
|
||||
return entry;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
|
||||
u8 status, u16 opcode,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct msft_rp_le_monitor_advertisement *rp;
|
||||
struct adv_monitor *monitor;
|
||||
struct msft_monitor_advertisement_handle_data *handle_data;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
|
||||
if (!monitor) {
|
||||
bt_dev_err(hdev, "msft add advmon: monitor %d is not found!",
|
||||
msft->pending_add_handle);
|
||||
status = HCI_ERROR_UNSPECIFIED;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (status)
|
||||
goto unlock;
|
||||
|
||||
rp = (struct msft_rp_le_monitor_advertisement *)skb->data;
|
||||
if (skb->len < sizeof(*rp)) {
|
||||
status = HCI_ERROR_UNSPECIFIED;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL);
|
||||
if (!handle_data) {
|
||||
status = HCI_ERROR_UNSPECIFIED;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
handle_data->mgmt_handle = monitor->handle;
|
||||
handle_data->msft_handle = rp->handle;
|
||||
INIT_LIST_HEAD(&handle_data->list);
|
||||
list_add(&handle_data->list, &msft->handle_map);
|
||||
|
||||
monitor->state = ADV_MONITOR_STATE_OFFLOADED;
|
||||
|
||||
unlock:
|
||||
if (status && monitor)
|
||||
hci_free_adv_monitor(hdev, monitor);
|
||||
|
||||
/* If in restart/reregister sequence, keep registering. */
|
||||
if (msft->reregistering)
|
||||
reregister_monitor_on_restart(hdev,
|
||||
msft->pending_add_handle + 1);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
if (!msft->reregistering)
|
||||
hci_add_adv_patterns_monitor_complete(hdev, status);
|
||||
}
|
||||
|
||||
static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
|
||||
u8 status, u16 opcode,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct msft_cp_le_cancel_monitor_advertisement *cp;
|
||||
struct msft_rp_le_cancel_monitor_advertisement *rp;
|
||||
struct adv_monitor *monitor;
|
||||
struct msft_monitor_advertisement_handle_data *handle_data;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
int err;
|
||||
bool pending;
|
||||
|
||||
if (status)
|
||||
goto done;
|
||||
|
||||
rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
|
||||
if (skb->len < sizeof(*rp)) {
|
||||
status = HCI_ERROR_UNSPECIFIED;
|
||||
goto done;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
|
||||
handle_data = msft_find_handle_data(hdev, cp->handle, false);
|
||||
|
||||
if (handle_data) {
|
||||
monitor = idr_find(&hdev->adv_monitors_idr,
|
||||
handle_data->mgmt_handle);
|
||||
if (monitor)
|
||||
hci_free_adv_monitor(hdev, monitor);
|
||||
|
||||
list_del(&handle_data->list);
|
||||
kfree(handle_data);
|
||||
}
|
||||
|
||||
/* If remove all monitors is required, we need to continue the process
|
||||
* here because the earlier it was paused when waiting for the
|
||||
* response from controller.
|
||||
*/
|
||||
if (msft->pending_remove_handle == 0) {
|
||||
pending = hci_remove_all_adv_monitor(hdev, &err);
|
||||
if (pending) {
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
if (err)
|
||||
status = HCI_ERROR_UNSPECIFIED;
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
done:
|
||||
hci_remove_adv_monitor_complete(hdev, status);
|
||||
}
|
||||
|
||||
static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
|
||||
u8 status, u16 opcode,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct msft_cp_le_set_advertisement_filter_enable *cp;
|
||||
struct msft_rp_le_set_advertisement_filter_enable *rp;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
|
||||
rp = (struct msft_rp_le_set_advertisement_filter_enable *)skb->data;
|
||||
if (skb->len < sizeof(*rp))
|
||||
return;
|
||||
|
||||
/* Error 0x0C would be returned if the filter enabled status is
|
||||
* already set to whatever we were trying to set.
|
||||
* Although the default state should be disabled, some controller set
|
||||
* the initial value to enabled. Because there is no way to know the
|
||||
* actual initial value before sending this command, here we also treat
|
||||
* error 0x0C as success.
|
||||
*/
|
||||
if (status != 0x00 && status != 0x0C)
|
||||
return;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
|
||||
msft->filter_enabled = cp->enable;
|
||||
|
||||
if (status == 0x0C)
|
||||
bt_dev_warn(hdev, "MSFT filter_enable is already %s",
|
||||
cp->enable ? "on" : "off");
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static bool msft_monitor_rssi_valid(struct adv_monitor *monitor)
|
||||
{
|
||||
struct adv_rssi_thresholds *r = &monitor->rssi;
|
||||
|
||||
if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
|
||||
r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX ||
|
||||
r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
|
||||
r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX)
|
||||
return false;
|
||||
|
||||
/* High_threshold_timeout is not supported,
|
||||
* once high_threshold is reached, events are immediately reported.
|
||||
*/
|
||||
if (r->high_threshold_timeout != 0)
|
||||
return false;
|
||||
|
||||
if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX)
|
||||
return false;
|
||||
|
||||
/* Sampling period from 0x00 to 0xFF are all allowed */
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool msft_monitor_pattern_valid(struct adv_monitor *monitor)
|
||||
{
|
||||
return msft_monitor_rssi_valid(monitor);
|
||||
/* No additional check needed for pattern-based monitor */
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static int __msft_add_monitor_pattern(struct hci_dev *hdev,
|
||||
struct adv_monitor *monitor)
|
||||
{
|
||||
struct msft_cp_le_monitor_advertisement *cp;
|
||||
struct msft_le_monitor_advertisement_pattern_data *pattern_data;
|
||||
struct msft_le_monitor_advertisement_pattern *pattern;
|
||||
struct adv_pattern *entry;
|
||||
struct hci_request req;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
size_t total_size = sizeof(*cp) + sizeof(*pattern_data);
|
||||
ptrdiff_t offset = 0;
|
||||
u8 pattern_count = 0;
|
||||
int err = 0;
|
||||
|
||||
if (!msft_monitor_pattern_valid(monitor))
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(entry, &monitor->patterns, list) {
|
||||
pattern_count++;
|
||||
total_size += sizeof(*pattern) + entry->length;
|
||||
}
|
||||
|
||||
cp = kmalloc(total_size, GFP_KERNEL);
|
||||
if (!cp)
|
||||
return -ENOMEM;
|
||||
|
||||
cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
|
||||
cp->rssi_high = monitor->rssi.high_threshold;
|
||||
cp->rssi_low = monitor->rssi.low_threshold;
|
||||
cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout;
|
||||
cp->rssi_sampling_period = monitor->rssi.sampling_period;
|
||||
|
||||
cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN;
|
||||
|
||||
pattern_data = (void *)cp->data;
|
||||
pattern_data->count = pattern_count;
|
||||
|
||||
list_for_each_entry(entry, &monitor->patterns, list) {
|
||||
pattern = (void *)(pattern_data->data + offset);
|
||||
/* the length also includes data_type and offset */
|
||||
pattern->length = entry->length + 2;
|
||||
pattern->data_type = entry->ad_type;
|
||||
pattern->start_byte = entry->offset;
|
||||
memcpy(pattern->pattern, entry->value, entry->length);
|
||||
offset += sizeof(*pattern) + entry->length;
|
||||
}
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
hci_req_add(&req, hdev->msft_opcode, total_size, cp);
|
||||
err = hci_req_run_skb(&req, msft_le_monitor_advertisement_cb);
|
||||
kfree(cp);
|
||||
|
||||
if (!err)
|
||||
msft->pending_add_handle = monitor->handle;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor)
|
||||
{
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
|
||||
if (!msft)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (msft->reregistering)
|
||||
return -EBUSY;
|
||||
|
||||
return __msft_add_monitor_pattern(hdev, monitor);
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
|
||||
u16 handle)
|
||||
{
|
||||
struct msft_cp_le_cancel_monitor_advertisement cp;
|
||||
struct msft_monitor_advertisement_handle_data *handle_data;
|
||||
struct hci_request req;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
int err = 0;
|
||||
|
||||
if (!msft)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (msft->reregistering)
|
||||
return -EBUSY;
|
||||
|
||||
handle_data = msft_find_handle_data(hdev, monitor->handle, true);
|
||||
|
||||
/* If no matched handle, just remove without telling controller */
|
||||
if (!handle_data)
|
||||
return -ENOENT;
|
||||
|
||||
cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
|
||||
cp.handle = handle_data->msft_handle;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
hci_req_add(&req, hdev->msft_opcode, sizeof(cp), &cp);
|
||||
err = hci_req_run_skb(&req, msft_le_cancel_monitor_advertisement_cb);
|
||||
|
||||
if (!err)
|
||||
msft->pending_remove_handle = handle;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct msft_cp_le_set_advertisement_filter_enable cp;
|
||||
|
||||
cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE;
|
||||
cp.enable = enable;
|
||||
|
||||
hci_req_add(req, hdev->msft_opcode, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
int msft_set_filter_enable(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
struct hci_request req;
|
||||
struct msft_data *msft = hdev->msft_data;
|
||||
int err;
|
||||
|
||||
if (!msft)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
msft_req_add_set_filter_enable(&req, enable);
|
||||
err = hci_req_run_skb(&req, msft_le_set_advertisement_filter_enable_cb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -12,16 +12,46 @@
|
||||
|
||||
#if IS_ENABLED(CONFIG_BT_MSFTEXT)
|
||||
|
||||
bool msft_monitor_supported(struct hci_dev *hdev);
|
||||
void msft_do_open(struct hci_dev *hdev);
|
||||
void msft_do_close(struct hci_dev *hdev);
|
||||
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb);
|
||||
__u64 msft_get_features(struct hci_dev *hdev);
|
||||
int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor);
|
||||
int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
|
||||
u16 handle);
|
||||
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable);
|
||||
int msft_set_filter_enable(struct hci_dev *hdev, bool enable);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool msft_monitor_supported(struct hci_dev *hdev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void msft_do_open(struct hci_dev *hdev) {}
|
||||
static inline void msft_do_close(struct hci_dev *hdev) {}
|
||||
static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {}
|
||||
static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; }
|
||||
static inline int msft_add_monitor_pattern(struct hci_dev *hdev,
|
||||
struct adv_monitor *monitor)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int msft_remove_monitor(struct hci_dev *hdev,
|
||||
struct adv_monitor *monitor,
|
||||
u16 handle)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void msft_req_add_set_filter_enable(struct hci_request *req,
|
||||
bool enable) {}
|
||||
static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/kpp.h>
|
||||
|
||||
@ -425,7 +424,7 @@ static int smp_c1(const u8 k[16],
|
||||
SMP_DBG("p1 %16phN", p1);
|
||||
|
||||
/* res = r XOR p1 */
|
||||
u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
|
||||
crypto_xor_cpy(res, r, p1, sizeof(p1));
|
||||
|
||||
/* res = e(k, res) */
|
||||
err = smp_e(k, res);
|
||||
@ -442,7 +441,7 @@ static int smp_c1(const u8 k[16],
|
||||
SMP_DBG("p2 %16phN", p2);
|
||||
|
||||
/* res = res XOR p2 */
|
||||
u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
|
||||
crypto_xor(res, p2, sizeof(p2));
|
||||
|
||||
/* res = e(k, res) */
|
||||
err = smp_e(k, res);
|
||||
|
Loading…
Reference in New Issue
Block a user