2019-05-29 01:10:04 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-07-14 06:34:54 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2009, Microsoft Corporation.
|
|
|
|
*
|
|
|
|
* Authors:
|
2009-11-24 01:00:22 +08:00
|
|
|
* Haiyang Zhang <haiyangz@microsoft.com>
|
2009-07-14 06:34:54 +08:00
|
|
|
* Hank Janssen <hjanssen@microsoft.com>
|
|
|
|
*/
|
2011-03-30 04:58:48 +08:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2009-07-15 06:08:20 +08:00
|
|
|
#include <linux/kernel.h>
|
2011-02-12 01:59:43 +08:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/wait.h>
|
2009-07-16 02:06:01 +08:00
|
|
|
#include <linux/mm.h>
|
2009-07-17 02:50:41 +08:00
|
|
|
#include <linux/delay.h>
|
2009-09-03 01:33:05 +08:00
|
|
|
#include <linux/io.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2011-09-02 03:19:41 +08:00
|
|
|
#include <linux/netdevice.h>
|
2011-12-16 05:45:16 +08:00
|
|
|
#include <linux/if_ether.h>
|
2015-06-02 17:01:38 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2017-07-20 02:53:16 +08:00
|
|
|
#include <linux/rtnetlink.h>
|
2017-07-25 01:57:27 +08:00
|
|
|
#include <linux/prefetch.h>
|
2017-07-20 02:53:16 +08:00
|
|
|
|
2014-05-01 01:14:31 +08:00
|
|
|
#include <asm/sync_bitops.h>
|
2021-02-01 22:48:14 +08:00
|
|
|
#include <asm/mshyperv.h>
|
2011-05-13 10:34:15 +08:00
|
|
|
|
2011-05-13 10:34:37 +08:00
|
|
|
#include "hyperv_net.h"
|
2018-03-17 06:44:28 +08:00
|
|
|
#include "netvsc_trace.h"
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2016-04-15 07:31:54 +08:00
|
|
|
/*
|
|
|
|
* Switch the data path from the synthetic interface to the VF
|
|
|
|
* interface.
|
|
|
|
*/
|
2021-03-30 07:21:35 +08:00
|
|
|
int netvsc_switch_datapath(struct net_device *ndev, bool vf)
|
2016-04-15 07:31:54 +08:00
|
|
|
{
|
2016-05-13 19:55:22 +08:00
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
|
|
|
struct hv_device *dev = net_device_ctx->device_ctx;
|
2017-07-20 02:53:13 +08:00
|
|
|
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
|
2016-05-13 19:55:23 +08:00
|
|
|
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
|
2021-03-30 07:21:35 +08:00
|
|
|
int ret, retry = 0;
|
2016-04-15 07:31:54 +08:00
|
|
|
|
2021-01-09 08:53:42 +08:00
|
|
|
/* Block sending traffic to VF if it's about to be gone */
|
|
|
|
if (!vf)
|
|
|
|
net_device_ctx->data_path_is_vf = vf;
|
|
|
|
|
2016-04-15 07:31:54 +08:00
|
|
|
memset(init_pkt, 0, sizeof(struct nvsp_message));
|
|
|
|
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
|
|
|
|
if (vf)
|
|
|
|
init_pkt->msg.v4_msg.active_dp.active_datapath =
|
|
|
|
NVSP_DATAPATH_VF;
|
|
|
|
else
|
|
|
|
init_pkt->msg.v4_msg.active_dp.active_datapath =
|
|
|
|
NVSP_DATAPATH_SYNTHETIC;
|
|
|
|
|
2021-03-30 07:21:35 +08:00
|
|
|
again:
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, init_pkt);
|
|
|
|
|
2021-03-30 07:21:35 +08:00
|
|
|
ret = vmbus_sendpacket(dev->channel, init_pkt,
|
2016-04-15 07:31:54 +08:00
|
|
|
sizeof(struct nvsp_message),
|
2021-03-30 07:21:35 +08:00
|
|
|
(unsigned long)init_pkt, VM_PKT_DATA_INBAND,
|
2021-01-09 08:53:42 +08:00
|
|
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
2021-03-30 07:21:35 +08:00
|
|
|
|
|
|
|
/* If failed to switch to/from VF, let data_path_is_vf stay false,
|
|
|
|
* so we use synthetic path to send data.
|
|
|
|
*/
|
|
|
|
if (ret) {
|
|
|
|
if (ret != -EAGAIN) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"Unable to send sw datapath msg, err: %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (retry++ < RETRY_MAX) {
|
|
|
|
usleep_range(RETRY_US_LO, RETRY_US_HI);
|
|
|
|
goto again;
|
|
|
|
} else {
|
|
|
|
netdev_err(
|
|
|
|
ndev,
|
|
|
|
"Retry failed to send sw datapath msg, err: %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-09 08:53:42 +08:00
|
|
|
wait_for_completion(&nv_dev->channel_init_wait);
|
|
|
|
net_device_ctx->data_path_is_vf = vf;
|
2021-03-30 07:21:35 +08:00
|
|
|
|
|
|
|
return 0;
|
2016-04-15 07:31:54 +08:00
|
|
|
}
|
|
|
|
|
2018-06-30 05:07:16 +08:00
|
|
|
/* Worker to setup sub channels on initial setup
|
|
|
|
* Initial hotplug event occurs in softirq context
|
|
|
|
* and can't wait for channels.
|
|
|
|
*/
|
|
|
|
static void netvsc_subchan_work(struct work_struct *w)
|
|
|
|
{
|
|
|
|
struct netvsc_device *nvdev =
|
|
|
|
container_of(w, struct netvsc_device, subchan_work);
|
|
|
|
struct rndis_device *rdev;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
/* Avoid deadlock with device removal already under RTNL */
|
|
|
|
if (!rtnl_trylock()) {
|
|
|
|
schedule_work(w);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdev = nvdev->extension;
|
|
|
|
if (rdev) {
|
2019-01-15 08:51:44 +08:00
|
|
|
ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
|
2018-06-30 05:07:16 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
netif_device_attach(rdev->ndev);
|
|
|
|
} else {
|
|
|
|
/* fallback to only primary channel */
|
|
|
|
for (i = 1; i < nvdev->num_chn; i++)
|
|
|
|
netif_napi_del(&nvdev->chan_table[i].napi);
|
|
|
|
|
|
|
|
nvdev->max_chn = 1;
|
|
|
|
nvdev->num_chn = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
2016-05-13 19:55:25 +08:00
|
|
|
static struct netvsc_device *alloc_net_device(void)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2010-12-11 04:03:54 +08:00
|
|
|
struct netvsc_device *net_device;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2010-12-11 04:03:54 +08:00
|
|
|
net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
|
|
|
|
if (!net_device)
|
2009-07-14 06:34:54 +08:00
|
|
|
return NULL;
|
|
|
|
|
2012-06-04 14:42:38 +08:00
|
|
|
init_waitqueue_head(&net_device->wait_drain);
|
2011-08-28 02:31:12 +08:00
|
|
|
net_device->destroy = false;
|
2020-02-22 00:32:18 +08:00
|
|
|
net_device->tx_disable = true;
|
2017-12-13 08:48:39 +08:00
|
|
|
|
2015-03-27 00:03:37 +08:00
|
|
|
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
|
|
|
|
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
|
2017-08-10 08:46:11 +08:00
|
|
|
|
2016-08-24 03:17:51 +08:00
|
|
|
init_completion(&net_device->channel_init_wait);
|
2017-08-04 08:13:54 +08:00
|
|
|
init_waitqueue_head(&net_device->subchan_open);
|
2018-06-30 05:07:16 +08:00
|
|
|
INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
|
2015-03-27 00:03:37 +08:00
|
|
|
|
2010-12-11 04:03:54 +08:00
|
|
|
return net_device;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2017-03-23 05:51:00 +08:00
|
|
|
static void free_netvsc_device(struct rcu_head *head)
|
2014-08-16 03:18:19 +08:00
|
|
|
{
|
2017-03-23 05:51:00 +08:00
|
|
|
struct netvsc_device *nvdev
|
|
|
|
= container_of(head, struct netvsc_device, rcu);
|
2016-08-20 05:47:09 +08:00
|
|
|
int i;
|
|
|
|
|
2018-03-21 06:03:03 +08:00
|
|
|
kfree(nvdev->extension);
|
|
|
|
vfree(nvdev->recv_buf);
|
|
|
|
vfree(nvdev->send_buf);
|
|
|
|
kfree(nvdev->send_section_map);
|
|
|
|
|
2020-01-24 05:52:34 +08:00
|
|
|
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
|
|
|
|
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
|
2021-01-27 00:29:07 +08:00
|
|
|
kfree(nvdev->chan_table[i].recv_buf);
|
2017-07-28 23:59:45 +08:00
|
|
|
vfree(nvdev->chan_table[i].mrc.slots);
|
2020-01-24 05:52:34 +08:00
|
|
|
}
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2014-08-16 03:18:19 +08:00
|
|
|
kfree(nvdev);
|
|
|
|
}
|
|
|
|
|
2017-03-23 05:51:00 +08:00
|
|
|
static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
|
|
|
|
{
|
|
|
|
call_rcu(&nvdev->rcu, free_netvsc_device);
|
|
|
|
}
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2018-04-06 03:09:19 +08:00
|
|
|
static void netvsc_revoke_recv_buf(struct hv_device *device,
|
2018-04-06 03:09:21 +08:00
|
|
|
struct netvsc_device *net_device,
|
|
|
|
struct net_device *ndev)
|
2011-04-22 03:30:43 +08:00
|
|
|
{
|
2018-04-06 03:09:19 +08:00
|
|
|
struct nvsp_message *revoke_packet;
|
2016-08-24 03:17:54 +08:00
|
|
|
int ret;
|
2011-04-22 03:30:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we got a section count, it means we received a
|
|
|
|
* SendReceiveBufferComplete msg (ie sent
|
|
|
|
* NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
|
|
|
|
* to send a revoke msg here
|
|
|
|
*/
|
|
|
|
if (net_device->recv_section_cnt) {
|
|
|
|
/* Send the revoke receive buffer */
|
|
|
|
revoke_packet = &net_device->revoke_packet;
|
|
|
|
memset(revoke_packet, 0, sizeof(struct nvsp_message));
|
|
|
|
|
|
|
|
revoke_packet->hdr.msg_type =
|
|
|
|
NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
|
|
|
|
revoke_packet->msg.v1_msg.
|
|
|
|
revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
|
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, revoke_packet);
|
|
|
|
|
2016-05-13 19:55:22 +08:00
|
|
|
ret = vmbus_sendpacket(device->channel,
|
2011-04-22 03:30:43 +08:00
|
|
|
revoke_packet,
|
|
|
|
sizeof(struct nvsp_message),
|
2020-11-09 18:04:02 +08:00
|
|
|
VMBUS_RQST_ID_NO_RESPONSE,
|
2011-04-22 03:30:43 +08:00
|
|
|
VM_PKT_DATA_INBAND, 0);
|
2017-04-20 04:53:49 +08:00
|
|
|
/* If the failure is because the channel is rescinded;
|
|
|
|
* ignore the failure since we cannot send on a rescinded
|
|
|
|
* channel. This would allow us to properly cleanup
|
|
|
|
* even when the channel is rescinded.
|
|
|
|
*/
|
|
|
|
if (device->channel->rescind)
|
|
|
|
ret = 0;
|
2011-04-22 03:30:43 +08:00
|
|
|
/*
|
|
|
|
* If we failed here, we might as well return and
|
|
|
|
* have a leak rather than continue and a bugchk
|
|
|
|
*/
|
|
|
|
if (ret != 0) {
|
2011-09-02 03:19:41 +08:00
|
|
|
netdev_err(ndev, "unable to send "
|
2011-09-02 03:19:40 +08:00
|
|
|
"revoke receive buffer to netvsp\n");
|
2016-08-24 03:17:54 +08:00
|
|
|
return;
|
2011-04-22 03:30:43 +08:00
|
|
|
}
|
2017-08-10 08:46:11 +08:00
|
|
|
net_device->recv_section_cnt = 0;
|
2011-04-22 03:30:43 +08:00
|
|
|
}
|
2018-04-06 03:09:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void netvsc_revoke_send_buf(struct hv_device *device,
|
2018-04-06 03:09:21 +08:00
|
|
|
struct netvsc_device *net_device,
|
|
|
|
struct net_device *ndev)
|
2018-04-06 03:09:19 +08:00
|
|
|
{
|
|
|
|
struct nvsp_message *revoke_packet;
|
|
|
|
int ret;
|
2011-04-22 03:30:43 +08:00
|
|
|
|
2014-05-01 01:14:31 +08:00
|
|
|
/* Deal with the send buffer we may have setup.
|
|
|
|
* If we got a send section size, it means we received a
|
2014-12-20 10:25:18 +08:00
|
|
|
* NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
|
|
|
|
* NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
|
2014-05-01 01:14:31 +08:00
|
|
|
* to send a revoke msg here
|
|
|
|
*/
|
2017-08-10 08:46:11 +08:00
|
|
|
if (net_device->send_section_cnt) {
|
2014-05-01 01:14:31 +08:00
|
|
|
/* Send the revoke receive buffer */
|
|
|
|
revoke_packet = &net_device->revoke_packet;
|
|
|
|
memset(revoke_packet, 0, sizeof(struct nvsp_message));
|
|
|
|
|
|
|
|
revoke_packet->hdr.msg_type =
|
|
|
|
NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
|
2014-12-20 10:25:18 +08:00
|
|
|
revoke_packet->msg.v1_msg.revoke_send_buf.id =
|
|
|
|
NETVSC_SEND_BUFFER_ID;
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, revoke_packet);
|
|
|
|
|
2016-05-13 19:55:22 +08:00
|
|
|
ret = vmbus_sendpacket(device->channel,
|
2014-05-01 01:14:31 +08:00
|
|
|
revoke_packet,
|
|
|
|
sizeof(struct nvsp_message),
|
2020-11-09 18:04:02 +08:00
|
|
|
VMBUS_RQST_ID_NO_RESPONSE,
|
2014-05-01 01:14:31 +08:00
|
|
|
VM_PKT_DATA_INBAND, 0);
|
2017-04-20 04:53:49 +08:00
|
|
|
|
|
|
|
/* If the failure is because the channel is rescinded;
|
|
|
|
* ignore the failure since we cannot send on a rescinded
|
|
|
|
* channel. This would allow us to properly cleanup
|
|
|
|
* even when the channel is rescinded.
|
|
|
|
*/
|
|
|
|
if (device->channel->rescind)
|
|
|
|
ret = 0;
|
|
|
|
|
2014-05-01 01:14:31 +08:00
|
|
|
/* If we failed here, we might as well return and
|
|
|
|
* have a leak rather than continue and a bugchk
|
|
|
|
*/
|
|
|
|
if (ret != 0) {
|
|
|
|
netdev_err(ndev, "unable to send "
|
|
|
|
"revoke send buffer to netvsp\n");
|
2016-08-24 03:17:54 +08:00
|
|
|
return;
|
2014-05-01 01:14:31 +08:00
|
|
|
}
|
2017-08-10 08:46:11 +08:00
|
|
|
net_device->send_section_cnt = 0;
|
2014-05-01 01:14:31 +08:00
|
|
|
}
|
2017-11-02 18:35:30 +08:00
|
|
|
}
|
|
|
|
|
2018-04-06 03:09:19 +08:00
|
|
|
static void netvsc_teardown_recv_gpadl(struct hv_device *device,
|
2018-04-06 03:09:21 +08:00
|
|
|
struct netvsc_device *net_device,
|
|
|
|
struct net_device *ndev)
|
2017-11-02 18:35:30 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (net_device->recv_buf_gpadl_handle) {
|
|
|
|
ret = vmbus_teardown_gpadl(device->channel,
|
|
|
|
net_device->recv_buf_gpadl_handle);
|
|
|
|
|
|
|
|
/* If we failed here, we might as well return and have a leak
|
|
|
|
* rather than continue and a bugchk
|
|
|
|
*/
|
|
|
|
if (ret != 0) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"unable to teardown receive buffer's gpadl\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
net_device->recv_buf_gpadl_handle = 0;
|
|
|
|
}
|
2018-04-06 03:09:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void netvsc_teardown_send_gpadl(struct hv_device *device,
|
2018-04-06 03:09:21 +08:00
|
|
|
struct netvsc_device *net_device,
|
|
|
|
struct net_device *ndev)
|
2018-04-06 03:09:19 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2017-11-02 18:35:30 +08:00
|
|
|
|
2014-05-01 01:14:31 +08:00
|
|
|
if (net_device->send_buf_gpadl_handle) {
|
2016-05-13 19:55:22 +08:00
|
|
|
ret = vmbus_teardown_gpadl(device->channel,
|
2014-05-01 01:14:31 +08:00
|
|
|
net_device->send_buf_gpadl_handle);
|
|
|
|
|
|
|
|
/* If we failed here, we might as well return and have a leak
|
|
|
|
* rather than continue and a bugchk
|
|
|
|
*/
|
|
|
|
if (ret != 0) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"unable to teardown send buffer's gpadl\n");
|
2016-08-24 03:17:54 +08:00
|
|
|
return;
|
2014-05-01 01:14:31 +08:00
|
|
|
}
|
2014-06-17 04:59:02 +08:00
|
|
|
net_device->send_buf_gpadl_handle = 0;
|
2014-05-01 01:14:31 +08:00
|
|
|
}
|
2011-04-22 03:30:43 +08:00
|
|
|
}
|
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
|
|
|
|
{
|
|
|
|
struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
|
|
|
|
int node = cpu_to_node(nvchan->channel->target_cpu);
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
|
|
|
|
nvchan->mrc.slots = vzalloc_node(size, node);
|
|
|
|
if (!nvchan->mrc.slots)
|
|
|
|
nvchan->mrc.slots = vzalloc(size);
|
|
|
|
|
|
|
|
return nvchan->mrc.slots ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-06-09 07:21:22 +08:00
|
|
|
static int netvsc_init_buf(struct hv_device *device,
|
2017-08-10 08:46:11 +08:00
|
|
|
struct netvsc_device *net_device,
|
|
|
|
const struct netvsc_device_info *device_info)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2017-07-28 23:59:45 +08:00
|
|
|
struct nvsp_1_message_send_receive_buffer_complete *resp;
|
2017-08-10 08:46:07 +08:00
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
|
|
|
struct nvsp_message *init_packet;
|
2017-08-10 08:46:11 +08:00
|
|
|
unsigned int buf_size;
|
2017-04-25 09:33:38 +08:00
|
|
|
size_t map_words;
|
2021-02-03 19:35:12 +08:00
|
|
|
int i, ret = 0;
|
2015-05-29 08:08:06 +08:00
|
|
|
|
2017-08-10 08:46:11 +08:00
|
|
|
/* Get receive buffer area. */
|
2017-09-21 02:17:35 +08:00
|
|
|
buf_size = device_info->recv_sections * device_info->recv_section_size;
|
2017-08-10 08:46:11 +08:00
|
|
|
buf_size = roundup(buf_size, PAGE_SIZE);
|
|
|
|
|
2017-12-12 00:56:57 +08:00
|
|
|
/* Legacy hosts only allow smaller receive buffer */
|
|
|
|
if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
|
|
|
|
buf_size = min_t(unsigned int, buf_size,
|
|
|
|
NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
|
|
|
|
|
2017-08-10 08:46:11 +08:00
|
|
|
net_device->recv_buf = vzalloc(buf_size);
|
2010-12-11 04:03:59 +08:00
|
|
|
if (!net_device->recv_buf) {
|
2017-08-10 08:46:11 +08:00
|
|
|
netdev_err(ndev,
|
|
|
|
"unable to allocate receive buffer of size %u\n",
|
|
|
|
buf_size);
|
2011-08-26 00:49:13 +08:00
|
|
|
ret = -ENOMEM;
|
2011-02-12 01:59:43 +08:00
|
|
|
goto cleanup;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2018-03-23 03:01:14 +08:00
|
|
|
net_device->recv_buf_size = buf_size;
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/*
|
|
|
|
* Establish the gpadl handle for this buffer on this
|
|
|
|
* channel. Note: This call uses the vmbus connection rather
|
|
|
|
* than the channel to establish the gpadl handle.
|
|
|
|
*/
|
2010-12-11 04:03:59 +08:00
|
|
|
ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
|
2017-08-10 08:46:11 +08:00
|
|
|
buf_size,
|
2010-12-11 04:03:59 +08:00
|
|
|
&net_device->recv_buf_gpadl_handle);
|
2009-09-03 01:33:05 +08:00
|
|
|
if (ret != 0) {
|
2011-09-02 03:19:41 +08:00
|
|
|
netdev_err(ndev,
|
2011-09-02 03:19:40 +08:00
|
|
|
"unable to establish receive buffer's gpadl\n");
|
2011-02-12 01:59:43 +08:00
|
|
|
goto cleanup;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Notify the NetVsp of the gpadl handle */
|
2010-12-11 04:03:59 +08:00
|
|
|
init_packet = &net_device->channel_init_pkt;
|
2010-12-11 04:03:54 +08:00
|
|
|
memset(init_packet, 0, sizeof(struct nvsp_message));
|
2010-12-11 04:03:59 +08:00
|
|
|
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
|
|
|
|
init_packet->msg.v1_msg.send_recv_buf.
|
|
|
|
gpadl_handle = net_device->recv_buf_gpadl_handle;
|
|
|
|
init_packet->msg.v1_msg.
|
|
|
|
send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, init_packet);
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Send the gpadl notification request */
|
2010-12-11 04:03:54 +08:00
|
|
|
ret = vmbus_sendpacket(device->channel, init_packet,
|
2010-10-22 00:43:24 +08:00
|
|
|
sizeof(struct nvsp_message),
|
2010-12-11 04:03:54 +08:00
|
|
|
(unsigned long)init_packet,
|
2011-01-27 04:12:13 +08:00
|
|
|
VM_PKT_DATA_INBAND,
|
2010-10-22 00:43:24 +08:00
|
|
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
2009-09-03 01:33:05 +08:00
|
|
|
if (ret != 0) {
|
2011-09-02 03:19:41 +08:00
|
|
|
netdev_err(ndev,
|
2011-09-02 03:19:40 +08:00
|
|
|
"unable to send receive buffer's gpadl to netvsp\n");
|
2011-02-12 01:59:43 +08:00
|
|
|
goto cleanup;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2016-06-09 18:44:03 +08:00
|
|
|
wait_for_completion(&net_device->channel_init_wait);
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Check the response */
|
2017-07-28 23:59:45 +08:00
|
|
|
resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
|
|
|
|
if (resp->status != NVSP_STAT_SUCCESS) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"Unable to complete receive buffer initialization with NetVsp - status %d\n",
|
|
|
|
resp->status);
|
2011-08-26 00:49:13 +08:00
|
|
|
ret = -EINVAL;
|
2011-02-12 01:59:43 +08:00
|
|
|
goto cleanup;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Parse the response */
|
2017-07-28 23:59:45 +08:00
|
|
|
netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
|
|
|
|
resp->num_sections, resp->sections[0].sub_alloc_size,
|
|
|
|
resp->sections[0].num_sub_allocs);
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2017-08-10 08:46:11 +08:00
|
|
|
/* There should only be one section for the entire receive buffer */
|
|
|
|
if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
|
2011-08-26 00:49:13 +08:00
|
|
|
ret = -EINVAL;
|
2011-02-12 01:59:43 +08:00
|
|
|
goto cleanup;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2017-08-10 08:46:11 +08:00
|
|
|
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
|
|
|
|
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
|
|
|
|
|
2020-09-16 17:47:27 +08:00
|
|
|
/* Ensure buffer will not overflow */
|
|
|
|
if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
|
|
|
|
(u64)net_device->recv_section_cnt > (u64)buf_size) {
|
|
|
|
netdev_err(ndev, "invalid recv_section_size %u\n",
|
|
|
|
net_device->recv_section_size);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2021-02-03 19:35:12 +08:00
|
|
|
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
|
|
|
|
struct netvsc_channel *nvchan = &net_device->chan_table[i];
|
|
|
|
|
|
|
|
nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
|
|
|
|
if (nvchan->recv_buf == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-31 03:29:13 +08:00
|
|
|
/* Setup receive completion ring.
|
|
|
|
* Add 1 to the recv_section_cnt because at least one entry in a
|
|
|
|
* ring buffer has to be empty.
|
|
|
|
*/
|
|
|
|
net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
|
2017-07-28 23:59:45 +08:00
|
|
|
ret = netvsc_alloc_recv_comp_ring(net_device, 0);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Now setup the send buffer. */
|
2017-09-21 02:17:35 +08:00
|
|
|
buf_size = device_info->send_sections * device_info->send_section_size;
|
2017-08-10 08:46:11 +08:00
|
|
|
buf_size = round_up(buf_size, PAGE_SIZE);
|
|
|
|
|
|
|
|
net_device->send_buf = vzalloc(buf_size);
|
2014-05-01 01:14:31 +08:00
|
|
|
if (!net_device->send_buf) {
|
2017-08-10 08:46:11 +08:00
|
|
|
netdev_err(ndev, "unable to allocate send buffer of size %u\n",
|
|
|
|
buf_size);
|
2014-05-01 01:14:31 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Establish the gpadl handle for this buffer on this
|
|
|
|
* channel. Note: This call uses the vmbus connection rather
|
|
|
|
* than the channel to establish the gpadl handle.
|
|
|
|
*/
|
|
|
|
ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
|
2017-08-10 08:46:11 +08:00
|
|
|
buf_size,
|
2014-05-01 01:14:31 +08:00
|
|
|
&net_device->send_buf_gpadl_handle);
|
|
|
|
if (ret != 0) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"unable to establish send buffer's gpadl\n");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Notify the NetVsp of the gpadl handle */
|
|
|
|
init_packet = &net_device->channel_init_pkt;
|
|
|
|
memset(init_packet, 0, sizeof(struct nvsp_message));
|
|
|
|
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
|
2014-12-20 10:25:18 +08:00
|
|
|
init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
|
2014-05-01 01:14:31 +08:00
|
|
|
net_device->send_buf_gpadl_handle;
|
2014-12-20 10:25:18 +08:00
|
|
|
init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, init_packet);
|
|
|
|
|
2014-05-01 01:14:31 +08:00
|
|
|
/* Send the gpadl notification request */
|
|
|
|
ret = vmbus_sendpacket(device->channel, init_packet,
|
|
|
|
sizeof(struct nvsp_message),
|
|
|
|
(unsigned long)init_packet,
|
|
|
|
VM_PKT_DATA_INBAND,
|
|
|
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
|
|
|
if (ret != 0) {
|
|
|
|
netdev_err(ndev,
|
|
|
|
"unable to send send buffer's gpadl to netvsp\n");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-06-09 18:44:03 +08:00
|
|
|
wait_for_completion(&net_device->channel_init_wait);
|
2014-05-01 01:14:31 +08:00
|
|
|
|
|
|
|
/* Check the response */
|
|
|
|
if (init_packet->msg.v1_msg.
|
|
|
|
send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
|
|
|
|
netdev_err(ndev, "Unable to complete send buffer "
|
|
|
|
"initialization with NetVsp - status %d\n",
|
|
|
|
init_packet->msg.v1_msg.
|
2014-12-20 10:25:18 +08:00
|
|
|
send_send_buf_complete.status);
|
2014-05-01 01:14:31 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse the response */
|
|
|
|
net_device->send_section_size = init_packet->msg.
|
|
|
|
v1_msg.send_send_buf_complete.section_size;
|
2020-09-16 17:47:27 +08:00
|
|
|
if (net_device->send_section_size < NETVSC_MTU_MIN) {
|
|
|
|
netdev_err(ndev, "invalid send_section_size %u\n",
|
|
|
|
net_device->send_section_size);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2017-08-10 08:46:11 +08:00
|
|
|
/* Section count is simply the size divided by the section size. */
|
|
|
|
net_device->send_section_cnt = buf_size / net_device->send_section_size;
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2016-11-29 01:25:44 +08:00
|
|
|
netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
|
|
|
|
net_device->send_section_size, net_device->send_section_cnt);
|
2014-05-01 01:14:31 +08:00
|
|
|
|
|
|
|
/* Setup state for managing the send buffer. */
|
2017-04-25 09:33:38 +08:00
|
|
|
map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2017-04-25 09:33:38 +08:00
|
|
|
net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
|
2014-07-23 09:00:35 +08:00
|
|
|
if (net_device->send_section_map == NULL) {
|
|
|
|
ret = -ENOMEM;
|
2014-05-01 01:14:31 +08:00
|
|
|
goto cleanup;
|
2014-07-23 09:00:35 +08:00
|
|
|
}
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2011-02-12 01:59:43 +08:00
|
|
|
goto exit;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2011-02-12 01:59:43 +08:00
|
|
|
cleanup:
|
2018-04-06 03:09:21 +08:00
|
|
|
netvsc_revoke_recv_buf(device, net_device, ndev);
|
|
|
|
netvsc_revoke_send_buf(device, net_device, ndev);
|
|
|
|
netvsc_teardown_recv_gpadl(device, net_device, ndev);
|
|
|
|
netvsc_teardown_send_gpadl(device, net_device, ndev);
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2011-02-12 01:59:43 +08:00
|
|
|
exit:
|
2009-07-14 06:34:54 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-12-16 05:45:16 +08:00
|
|
|
/* Negotiate NVSP protocol version */
|
|
|
|
static int negotiate_nvsp_ver(struct hv_device *device,
|
|
|
|
struct netvsc_device *net_device,
|
|
|
|
struct nvsp_message *init_packet,
|
|
|
|
u32 nvsp_ver)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2016-05-13 19:55:23 +08:00
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
2015-01-25 22:46:31 +08:00
|
|
|
int ret;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2010-12-11 04:03:54 +08:00
|
|
|
memset(init_packet, 0, sizeof(struct nvsp_message));
|
2010-12-11 04:03:59 +08:00
|
|
|
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
|
2011-12-16 05:45:16 +08:00
|
|
|
init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
|
|
|
|
init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, init_packet);
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Send the init request */
|
2010-12-11 04:03:54 +08:00
|
|
|
ret = vmbus_sendpacket(device->channel, init_packet,
|
2010-10-22 00:43:24 +08:00
|
|
|
sizeof(struct nvsp_message),
|
2010-12-11 04:03:54 +08:00
|
|
|
(unsigned long)init_packet,
|
2011-01-27 04:12:13 +08:00
|
|
|
VM_PKT_DATA_INBAND,
|
2010-10-22 00:43:24 +08:00
|
|
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
2009-09-03 01:33:05 +08:00
|
|
|
|
2011-03-30 04:58:45 +08:00
|
|
|
if (ret != 0)
|
2011-12-16 05:45:16 +08:00
|
|
|
return ret;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2016-06-09 18:44:03 +08:00
|
|
|
wait_for_completion(&net_device->channel_init_wait);
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2010-12-11 04:03:59 +08:00
|
|
|
if (init_packet->msg.init_msg.init_complete.status !=
|
2011-12-16 05:45:16 +08:00
|
|
|
NVSP_STAT_SUCCESS)
|
|
|
|
return -EINVAL;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2014-02-20 07:49:45 +08:00
|
|
|
if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
|
2011-12-16 05:45:16 +08:00
|
|
|
return 0;
|
|
|
|
|
2015-07-25 01:08:40 +08:00
|
|
|
/* NVSPv2 or later: Send NDIS config */
|
2011-12-16 05:45:16 +08:00
|
|
|
memset(init_packet, 0, sizeof(struct nvsp_message));
|
|
|
|
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
|
2016-05-13 19:55:23 +08:00
|
|
|
init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
|
2012-03-12 18:20:50 +08:00
|
|
|
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
|
2011-12-16 05:45:16 +08:00
|
|
|
|
2016-08-05 01:42:15 +08:00
|
|
|
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
|
2021-02-01 22:48:14 +08:00
|
|
|
if (hv_is_isolation_supported())
|
|
|
|
netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
|
|
|
|
else
|
|
|
|
init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
|
2015-07-25 01:08:40 +08:00
|
|
|
|
2016-08-05 01:42:15 +08:00
|
|
|
/* Teaming bit is needed to receive link speed updates */
|
|
|
|
init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
|
|
|
|
}
|
|
|
|
|
2018-09-22 02:20:35 +08:00
|
|
|
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
|
|
|
|
init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
|
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, init_packet);
|
|
|
|
|
2011-12-16 05:45:16 +08:00
|
|
|
ret = vmbus_sendpacket(device->channel, init_packet,
|
|
|
|
sizeof(struct nvsp_message),
|
2020-11-09 18:04:02 +08:00
|
|
|
VMBUS_RQST_ID_NO_RESPONSE,
|
2011-12-16 05:45:16 +08:00
|
|
|
VM_PKT_DATA_INBAND, 0);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-09 07:21:22 +08:00
|
|
|
static int netvsc_connect_vsp(struct hv_device *device,
|
2017-08-10 08:46:11 +08:00
|
|
|
struct netvsc_device *net_device,
|
|
|
|
const struct netvsc_device_info *device_info)
|
2011-12-16 05:45:16 +08:00
|
|
|
{
|
2018-03-17 06:44:28 +08:00
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
2017-09-22 23:50:23 +08:00
|
|
|
static const u32 ver_list[] = {
|
2016-08-24 03:17:49 +08:00
|
|
|
NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
|
2018-04-18 06:31:47 +08:00
|
|
|
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
|
|
|
|
NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
|
2017-06-09 07:21:22 +08:00
|
|
|
};
|
|
|
|
struct nvsp_message *init_packet;
|
|
|
|
int ndis_version, i, ret;
|
2011-12-16 05:45:16 +08:00
|
|
|
|
|
|
|
init_packet = &net_device->channel_init_pkt;
|
|
|
|
|
|
|
|
/* Negotiate the latest NVSP protocol supported */
|
2016-08-24 03:17:49 +08:00
|
|
|
for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
|
2014-02-20 07:49:45 +08:00
|
|
|
if (negotiate_nvsp_ver(device, net_device, init_packet,
|
|
|
|
ver_list[i]) == 0) {
|
|
|
|
net_device->nvsp_version = ver_list[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i < 0) {
|
2011-08-26 00:49:14 +08:00
|
|
|
ret = -EPROTO;
|
2011-02-12 01:59:43 +08:00
|
|
|
goto cleanup;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
2011-12-16 05:45:16 +08:00
|
|
|
|
2021-02-01 22:48:14 +08:00
|
|
|
if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
|
|
|
|
netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
|
|
|
|
net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
|
|
|
|
ret = -EPROTO;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-12-16 05:45:16 +08:00
|
|
|
pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Send the ndis version */
|
2010-12-11 04:03:54 +08:00
|
|
|
memset(init_packet, 0, sizeof(struct nvsp_message));
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2014-02-20 07:49:45 +08:00
|
|
|
if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
|
2014-04-10 06:00:46 +08:00
|
|
|
ndis_version = 0x00060001;
|
2014-02-20 07:49:45 +08:00
|
|
|
else
|
|
|
|
ndis_version = 0x0006001e;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2010-12-11 04:03:59 +08:00
|
|
|
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
|
|
|
|
init_packet->msg.v1_msg.
|
|
|
|
send_ndis_ver.ndis_major_ver =
|
2010-12-11 04:03:54 +08:00
|
|
|
(ndis_version & 0xFFFF0000) >> 16;
|
2010-12-11 04:03:59 +08:00
|
|
|
init_packet->msg.v1_msg.
|
|
|
|
send_ndis_ver.ndis_minor_ver =
|
2010-12-11 04:03:54 +08:00
|
|
|
ndis_version & 0xFFFF;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send(ndev, init_packet);
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Send the init request */
|
2010-12-11 04:03:54 +08:00
|
|
|
ret = vmbus_sendpacket(device->channel, init_packet,
|
2011-02-12 01:59:43 +08:00
|
|
|
sizeof(struct nvsp_message),
|
2020-11-09 18:04:02 +08:00
|
|
|
VMBUS_RQST_ID_NO_RESPONSE,
|
2011-02-12 01:59:43 +08:00
|
|
|
VM_PKT_DATA_INBAND, 0);
|
2011-08-26 00:49:14 +08:00
|
|
|
if (ret != 0)
|
2011-02-12 01:59:43 +08:00
|
|
|
goto cleanup;
|
2009-07-28 04:47:24 +08:00
|
|
|
|
2014-03-10 07:10:59 +08:00
|
|
|
|
2017-08-10 08:46:11 +08:00
|
|
|
ret = netvsc_init_buf(device, net_device, device_info);
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2011-02-12 01:59:43 +08:00
|
|
|
cleanup:
|
2009-07-14 06:34:54 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-03-05 06:11:00 +08:00
|
|
|
/*
|
2010-12-11 04:03:55 +08:00
|
|
|
* netvsc_device_remove - Callback when the root bus device is removed
|
2009-09-03 01:33:05 +08:00
|
|
|
*/
|
2016-08-24 03:17:50 +08:00
|
|
|
void netvsc_device_remove(struct hv_device *device)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2016-05-13 19:55:22 +08:00
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
2017-07-20 02:53:13 +08:00
|
|
|
struct netvsc_device *net_device
|
|
|
|
= rtnl_dereference(net_device_ctx->nvdev);
|
2017-02-28 02:26:49 +08:00
|
|
|
int i;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2018-04-06 03:09:20 +08:00
|
|
|
/*
|
|
|
|
* Revoke receive buffer. If host is pre-Win2016 then tear down
|
|
|
|
* receive buffer GPADL. Do the same for send buffer.
|
|
|
|
*/
|
2018-04-06 03:09:21 +08:00
|
|
|
netvsc_revoke_recv_buf(device, net_device, ndev);
|
2018-04-06 03:09:20 +08:00
|
|
|
if (vmbus_proto_version < VERSION_WIN10)
|
2018-04-06 03:09:21 +08:00
|
|
|
netvsc_teardown_recv_gpadl(device, net_device, ndev);
|
2018-04-06 03:09:20 +08:00
|
|
|
|
2018-04-06 03:09:21 +08:00
|
|
|
netvsc_revoke_send_buf(device, net_device, ndev);
|
2018-04-06 03:09:20 +08:00
|
|
|
if (vmbus_proto_version < VERSION_WIN10)
|
2018-04-06 03:09:21 +08:00
|
|
|
netvsc_teardown_send_gpadl(device, net_device, ndev);
|
2011-08-28 02:31:16 +08:00
|
|
|
|
2017-03-23 05:51:00 +08:00
|
|
|
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
|
2011-08-28 02:31:14 +08:00
|
|
|
|
2020-04-06 08:15:07 +08:00
|
|
|
/* Disable NAPI and disassociate its context from the device. */
|
|
|
|
for (i = 0; i < net_device->num_chn; i++) {
|
|
|
|
/* See also vmbus_reset_channel_cb(). */
|
|
|
|
napi_disable(&net_device->chan_table[i].napi);
|
2018-03-21 06:03:02 +08:00
|
|
|
netif_napi_del(&net_device->chan_table[i].napi);
|
2020-04-06 08:15:07 +08:00
|
|
|
}
|
2018-03-21 06:03:02 +08:00
|
|
|
|
2011-09-14 01:59:54 +08:00
|
|
|
/*
|
|
|
|
* At this point, no one should be accessing net_device
|
|
|
|
* except in here
|
|
|
|
*/
|
2016-11-29 01:25:44 +08:00
|
|
|
netdev_dbg(ndev, "net device safe to remove\n");
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Now, we can close the channel safely */
|
2010-12-11 04:03:54 +08:00
|
|
|
vmbus_close(device->channel);
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2018-04-06 03:09:20 +08:00
|
|
|
/*
|
|
|
|
* If host is Win2016 or higher then we do the GPADL tear down
|
|
|
|
* here after VMBus is closed.
|
|
|
|
*/
|
2018-04-06 03:09:19 +08:00
|
|
|
if (vmbus_proto_version >= VERSION_WIN10) {
|
2018-04-06 03:09:21 +08:00
|
|
|
netvsc_teardown_recv_gpadl(device, net_device, ndev);
|
|
|
|
netvsc_teardown_send_gpadl(device, net_device, ndev);
|
2018-04-06 03:09:19 +08:00
|
|
|
}
|
2017-02-28 02:26:49 +08:00
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Release all resources */
|
2017-03-23 05:51:00 +08:00
|
|
|
free_netvsc_device_rcu(net_device);
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2012-03-27 21:20:45 +08:00
|
|
|
#define RING_AVAIL_PERCENT_HIWATER 20
|
|
|
|
#define RING_AVAIL_PERCENT_LOWATER 10
|
|
|
|
|
2014-05-01 01:14:31 +08:00
|
|
|
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
|
|
|
|
u32 index)
|
|
|
|
{
|
|
|
|
sync_change_bit(index, net_device->send_section_map);
|
|
|
|
}
|
|
|
|
|
2018-04-27 05:34:25 +08:00
|
|
|
static void netvsc_send_tx_complete(struct net_device *ndev,
|
|
|
|
struct netvsc_device *net_device,
|
|
|
|
struct vmbus_channel *channel,
|
2017-04-08 02:41:19 +08:00
|
|
|
const struct vmpacket_descriptor *desc,
|
|
|
|
int budget)
|
2016-08-24 03:17:53 +08:00
|
|
|
{
|
2017-09-30 02:39:46 +08:00
|
|
|
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
2020-11-09 18:04:02 +08:00
|
|
|
struct sk_buff *skb;
|
2016-08-24 03:17:53 +08:00
|
|
|
u16 q_idx = 0;
|
|
|
|
int queue_sends;
|
2020-11-09 18:04:02 +08:00
|
|
|
u64 cmd_rqst;
|
|
|
|
|
|
|
|
cmd_rqst = vmbus_request_addr(&channel->requestor, (u64)desc->trans_id);
|
|
|
|
if (cmd_rqst == VMBUS_RQST_ERROR) {
|
|
|
|
netdev_err(ndev, "Incorrect transaction id\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = (struct sk_buff *)(unsigned long)cmd_rqst;
|
2016-08-24 03:17:53 +08:00
|
|
|
|
|
|
|
/* Notify the layer above us */
|
|
|
|
if (likely(skb)) {
|
2017-01-25 05:06:12 +08:00
|
|
|
const struct hv_netvsc_packet *packet
|
2016-08-24 03:17:53 +08:00
|
|
|
= (struct hv_netvsc_packet *)skb->cb;
|
2017-01-25 05:06:12 +08:00
|
|
|
u32 send_index = packet->send_buf_index;
|
|
|
|
struct netvsc_stats *tx_stats;
|
2016-08-24 03:17:53 +08:00
|
|
|
|
|
|
|
if (send_index != NETVSC_INVALID_INDEX)
|
|
|
|
netvsc_free_send_slot(net_device, send_index);
|
2017-01-25 05:06:12 +08:00
|
|
|
q_idx = packet->q_idx;
|
2016-08-24 03:17:53 +08:00
|
|
|
|
2017-01-25 05:06:13 +08:00
|
|
|
tx_stats = &net_device->chan_table[q_idx].tx_stats;
|
2017-01-25 05:06:12 +08:00
|
|
|
|
|
|
|
u64_stats_update_begin(&tx_stats->syncp);
|
|
|
|
tx_stats->packets += packet->total_packets;
|
|
|
|
tx_stats->bytes += packet->total_bytes;
|
|
|
|
u64_stats_update_end(&tx_stats->syncp);
|
|
|
|
|
2017-04-08 02:41:19 +08:00
|
|
|
napi_consume_skb(skb, budget);
|
2016-08-24 03:17:53 +08:00
|
|
|
}
|
|
|
|
|
2017-01-25 05:06:07 +08:00
|
|
|
queue_sends =
|
|
|
|
atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
|
2016-08-24 03:17:53 +08:00
|
|
|
|
2018-03-21 06:03:05 +08:00
|
|
|
if (unlikely(net_device->destroy)) {
|
|
|
|
if (queue_sends == 0)
|
|
|
|
wake_up(&net_device->wait_drain);
|
|
|
|
} else {
|
|
|
|
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
|
|
|
|
|
2019-03-29 03:40:36 +08:00
|
|
|
if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
|
2018-03-28 08:48:39 +08:00
|
|
|
(hv_get_avail_to_write_percent(&channel->outbound) >
|
|
|
|
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
|
2018-03-21 06:03:05 +08:00
|
|
|
netif_tx_wake_queue(txq);
|
|
|
|
ndev_ctx->eth_stats.wake_queue++;
|
|
|
|
}
|
2017-09-30 02:39:46 +08:00
|
|
|
}
|
2016-08-24 03:17:53 +08:00
|
|
|
}
|
|
|
|
|
2018-04-27 05:34:25 +08:00
|
|
|
static void netvsc_send_completion(struct net_device *ndev,
|
|
|
|
struct netvsc_device *net_device,
|
2015-12-02 08:43:05 +08:00
|
|
|
struct vmbus_channel *incoming_channel,
|
2017-04-08 02:41:19 +08:00
|
|
|
const struct vmpacket_descriptor *desc,
|
|
|
|
int budget)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2021-01-09 08:53:42 +08:00
|
|
|
const struct nvsp_message *nvsp_packet;
|
2020-09-16 17:47:27 +08:00
|
|
|
u32 msglen = hv_pkt_datalen(desc);
|
2021-01-09 08:53:42 +08:00
|
|
|
struct nvsp_message *pkt_rqst;
|
|
|
|
u64 cmd_rqst;
|
|
|
|
|
|
|
|
/* First check if this is a VMBUS completion without data payload */
|
|
|
|
if (!msglen) {
|
|
|
|
cmd_rqst = vmbus_request_addr(&incoming_channel->requestor,
|
|
|
|
(u64)desc->trans_id);
|
|
|
|
if (cmd_rqst == VMBUS_RQST_ERROR) {
|
|
|
|
netdev_err(ndev, "Invalid transaction id\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
|
|
|
|
switch (pkt_rqst->hdr.msg_type) {
|
|
|
|
case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
|
|
|
|
complete(&net_device->channel_init_wait);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
netdev_err(ndev, "Unexpected VMBUS completion!!\n");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2020-09-16 17:47:27 +08:00
|
|
|
|
|
|
|
/* Ensure packet is big enough to read header fields */
|
|
|
|
if (msglen < sizeof(struct nvsp_message_header)) {
|
|
|
|
netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
|
|
|
|
return;
|
|
|
|
}
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2021-01-09 08:53:42 +08:00
|
|
|
nvsp_packet = hv_pkt_data(desc);
|
2016-08-24 03:17:53 +08:00
|
|
|
switch (nvsp_packet->hdr.msg_type) {
|
|
|
|
case NVSP_MSG_TYPE_INIT_COMPLETE:
|
2020-09-16 17:47:27 +08:00
|
|
|
if (msglen < sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(struct nvsp_message_init_complete)) {
|
|
|
|
netdev_err(ndev, "nvsp_msg length too small: %u\n",
|
|
|
|
msglen);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
fallthrough;
|
|
|
|
|
2016-08-24 03:17:53 +08:00
|
|
|
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
|
2020-09-16 17:47:27 +08:00
|
|
|
if (msglen < sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
|
|
|
|
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
|
|
|
|
msglen);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
fallthrough;
|
|
|
|
|
2016-08-24 03:17:53 +08:00
|
|
|
case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
|
2020-09-16 17:47:27 +08:00
|
|
|
if (msglen < sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
|
|
|
|
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
|
|
|
|
msglen);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
fallthrough;
|
|
|
|
|
2016-08-24 03:17:53 +08:00
|
|
|
case NVSP_MSG5_TYPE_SUBCHANNEL:
|
2020-09-16 17:47:27 +08:00
|
|
|
if (msglen < sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(struct nvsp_5_subchannel_complete)) {
|
|
|
|
netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
|
|
|
|
msglen);
|
|
|
|
return;
|
|
|
|
}
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Copy the response back */
|
2010-12-11 04:03:59 +08:00
|
|
|
memcpy(&net_device->channel_init_pkt, nvsp_packet,
|
2009-09-03 01:33:05 +08:00
|
|
|
sizeof(struct nvsp_message));
|
2011-05-10 22:55:41 +08:00
|
|
|
complete(&net_device->channel_init_wait);
|
2016-08-24 03:17:53 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
|
2018-04-27 05:34:25 +08:00
|
|
|
netvsc_send_tx_complete(ndev, net_device, incoming_channel,
|
|
|
|
desc, budget);
|
2016-08-24 03:17:53 +08:00
|
|
|
break;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2016-08-24 03:17:53 +08:00
|
|
|
default:
|
|
|
|
netdev_err(ndev,
|
|
|
|
"Unknown send completion type %d received!!\n",
|
|
|
|
nvsp_packet->hdr.msg_type);
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-01 01:14:31 +08:00
|
|
|
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
|
|
|
|
{
|
2017-01-25 05:06:14 +08:00
|
|
|
unsigned long *map_addr = net_device->send_section_map;
|
|
|
|
unsigned int i;
|
|
|
|
|
2017-04-25 09:33:38 +08:00
|
|
|
for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
|
2017-01-25 05:06:14 +08:00
|
|
|
if (sync_test_and_set_bit(i, map_addr) == 0)
|
|
|
|
return i;
|
2014-05-01 01:14:31 +08:00
|
|
|
}
|
2017-01-25 05:06:14 +08:00
|
|
|
|
|
|
|
return NETVSC_INVALID_INDEX;
|
2014-05-01 01:14:31 +08:00
|
|
|
}
|
|
|
|
|
2017-12-13 08:48:35 +08:00
|
|
|
static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
|
|
|
unsigned int section_index,
|
|
|
|
u32 pend_size,
|
|
|
|
struct hv_netvsc_packet *packet,
|
|
|
|
struct rndis_message *rndis_msg,
|
|
|
|
struct hv_page_buffer *pb,
|
2017-12-13 08:48:40 +08:00
|
|
|
bool xmit_more)
|
2014-05-01 01:14:31 +08:00
|
|
|
{
|
|
|
|
char *start = net_device->send_buf;
|
2015-03-27 00:03:37 +08:00
|
|
|
char *dest = start + (section_index * net_device->send_section_size)
|
|
|
|
+ pend_size;
|
2014-05-01 01:14:31 +08:00
|
|
|
int i;
|
2015-03-27 00:03:37 +08:00
|
|
|
u32 padding = 0;
|
2015-04-14 07:34:35 +08:00
|
|
|
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
|
|
|
|
packet->page_buf_cnt;
|
2017-12-02 03:01:46 +08:00
|
|
|
u32 remain;
|
2015-03-27 00:03:37 +08:00
|
|
|
|
|
|
|
/* Add padding */
|
2017-12-02 03:01:46 +08:00
|
|
|
remain = packet->total_data_buflen & (net_device->pkt_align - 1);
|
2017-12-13 08:48:40 +08:00
|
|
|
if (xmit_more && remain) {
|
2015-03-27 00:03:37 +08:00
|
|
|
padding = net_device->pkt_align - remain;
|
2015-12-02 08:43:06 +08:00
|
|
|
rndis_msg->msg_len += padding;
|
2015-03-27 00:03:37 +08:00
|
|
|
packet->total_data_buflen += padding;
|
|
|
|
}
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2015-04-14 07:34:35 +08:00
|
|
|
for (i = 0; i < page_count; i++) {
|
2020-09-16 11:48:13 +08:00
|
|
|
char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
|
2017-07-28 23:59:44 +08:00
|
|
|
u32 offset = pb[i].offset;
|
|
|
|
u32 len = pb[i].len;
|
2014-05-01 01:14:31 +08:00
|
|
|
|
|
|
|
memcpy(dest, (src + offset), len);
|
|
|
|
dest += len;
|
|
|
|
}
|
2015-03-27 00:03:37 +08:00
|
|
|
|
2017-12-13 08:48:35 +08:00
|
|
|
if (padding)
|
2015-03-27 00:03:37 +08:00
|
|
|
memset(dest, 0, padding);
|
2014-05-01 01:14:31 +08:00
|
|
|
}
|
|
|
|
|
2016-09-10 03:45:24 +08:00
|
|
|
static inline int netvsc_send_pkt(
|
2016-05-13 19:55:23 +08:00
|
|
|
struct hv_device *device,
|
2015-03-27 00:03:37 +08:00
|
|
|
struct hv_netvsc_packet *packet,
|
2015-12-02 08:43:13 +08:00
|
|
|
struct netvsc_device *net_device,
|
2017-07-28 23:59:44 +08:00
|
|
|
struct hv_page_buffer *pb,
|
2015-12-02 08:43:14 +08:00
|
|
|
struct sk_buff *skb)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2015-03-27 00:03:37 +08:00
|
|
|
struct nvsp_message nvmsg;
|
2018-03-17 06:44:28 +08:00
|
|
|
struct nvsp_1_message_send_rndis_packet *rpkt =
|
2017-08-01 01:30:54 +08:00
|
|
|
&nvmsg.msg.v1_msg.send_rndis_pkt;
|
|
|
|
struct netvsc_channel * const nvchan =
|
|
|
|
&net_device->chan_table[packet->q_idx];
|
2017-01-25 05:06:07 +08:00
|
|
|
struct vmbus_channel *out_channel = nvchan->channel;
|
2016-05-13 19:55:23 +08:00
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
2017-09-30 02:39:46 +08:00
|
|
|
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
2017-01-25 05:06:07 +08:00
|
|
|
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
|
2015-03-27 00:03:37 +08:00
|
|
|
u64 req_id;
|
|
|
|
int ret;
|
2018-03-28 08:48:39 +08:00
|
|
|
u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
|
2014-05-01 01:14:31 +08:00
|
|
|
|
2021-01-15 04:26:28 +08:00
|
|
|
memset(&nvmsg, 0, sizeof(struct nvsp_message));
|
2015-03-27 00:03:37 +08:00
|
|
|
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
|
2017-08-01 01:30:54 +08:00
|
|
|
if (skb)
|
|
|
|
rpkt->channel_type = 0; /* 0 is RMC_DATA */
|
|
|
|
else
|
|
|
|
rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2017-08-01 01:30:54 +08:00
|
|
|
rpkt->send_buf_section_index = packet->send_buf_index;
|
2015-03-27 00:03:37 +08:00
|
|
|
if (packet->send_buf_index == NETVSC_INVALID_INDEX)
|
2017-08-01 01:30:54 +08:00
|
|
|
rpkt->send_buf_section_size = 0;
|
2015-03-27 00:03:37 +08:00
|
|
|
else
|
2017-08-01 01:30:54 +08:00
|
|
|
rpkt->send_buf_section_size = packet->total_data_buflen;
|
2009-09-03 01:33:05 +08:00
|
|
|
|
2015-12-02 08:43:14 +08:00
|
|
|
req_id = (ulong)skb;
|
2013-04-05 19:44:40 +08:00
|
|
|
|
2014-12-02 05:28:39 +08:00
|
|
|
if (out_channel->rescind)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_send_pkt(ndev, out_channel, rpkt);
|
|
|
|
|
2010-12-11 04:03:58 +08:00
|
|
|
if (packet->page_buf_cnt) {
|
2017-07-28 23:59:44 +08:00
|
|
|
if (packet->cp_partial)
|
|
|
|
pb += packet->rmsg_pgcnt;
|
|
|
|
|
2017-08-16 23:56:25 +08:00
|
|
|
ret = vmbus_sendpacket_pagebuffer(out_channel,
|
|
|
|
pb, packet->page_buf_cnt,
|
|
|
|
&nvmsg, sizeof(nvmsg),
|
|
|
|
req_id);
|
2009-09-03 01:33:05 +08:00
|
|
|
} else {
|
2017-08-16 23:56:26 +08:00
|
|
|
ret = vmbus_sendpacket(out_channel,
|
|
|
|
&nvmsg, sizeof(nvmsg),
|
|
|
|
req_id, VM_PKT_DATA_INBAND,
|
|
|
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2011-12-03 03:56:25 +08:00
|
|
|
if (ret == 0) {
|
2017-01-25 05:06:07 +08:00
|
|
|
atomic_inc_return(&nvchan->queue_sends);
|
2014-04-22 01:20:28 +08:00
|
|
|
|
2017-09-30 02:39:46 +08:00
|
|
|
if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
|
2017-01-25 05:06:07 +08:00
|
|
|
netif_tx_stop_queue(txq);
|
2017-09-30 02:39:46 +08:00
|
|
|
ndev_ctx->eth_stats.stop_queue++;
|
|
|
|
}
|
2011-12-03 03:56:25 +08:00
|
|
|
} else if (ret == -EAGAIN) {
|
2017-01-25 05:06:07 +08:00
|
|
|
netif_tx_stop_queue(txq);
|
2017-09-30 02:39:46 +08:00
|
|
|
ndev_ctx->eth_stats.stop_queue++;
|
2011-12-03 03:56:25 +08:00
|
|
|
} else {
|
2017-07-28 23:59:43 +08:00
|
|
|
netdev_err(ndev,
|
|
|
|
"Unable to send packet pages %u len %u, ret %d\n",
|
|
|
|
packet->page_buf_cnt, packet->total_data_buflen,
|
|
|
|
ret);
|
2011-12-03 03:56:25 +08:00
|
|
|
}
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2019-05-01 03:29:07 +08:00
|
|
|
if (netif_tx_queue_stopped(txq) &&
|
|
|
|
atomic_read(&nvchan->queue_sends) < 1 &&
|
|
|
|
!net_device->tx_disable) {
|
|
|
|
netif_tx_wake_queue(txq);
|
|
|
|
ndev_ctx->eth_stats.wake_queue++;
|
|
|
|
if (ret == -EAGAIN)
|
|
|
|
ret = -ENOSPC;
|
|
|
|
}
|
|
|
|
|
2015-03-27 00:03:37 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-26 01:49:31 +08:00
|
|
|
/* Move packet out of multi send data (msd), and clear msd */
|
|
|
|
static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
|
|
|
|
struct sk_buff **msd_skb,
|
|
|
|
struct multi_send_data *msdp)
|
|
|
|
{
|
|
|
|
*msd_skb = msdp->skb;
|
|
|
|
*msd_send = msdp->pkt;
|
|
|
|
msdp->skb = NULL;
|
|
|
|
msdp->pkt = NULL;
|
|
|
|
msdp->count = 0;
|
|
|
|
}
|
|
|
|
|
2017-07-20 02:53:17 +08:00
|
|
|
/* RCU already held by caller */
|
2021-03-13 07:45:27 +08:00
|
|
|
/* Batching/bouncing logic is designed to attempt to optimize
|
|
|
|
* performance.
|
|
|
|
*
|
|
|
|
* For small, non-LSO packets we copy the packet to a send buffer
|
|
|
|
* which is pre-registered with the Hyper-V side. This enables the
|
|
|
|
* hypervisor to avoid remapping the aperture to access the packet
|
|
|
|
* descriptor and data.
|
|
|
|
*
|
|
|
|
* If we already started using a buffer and the netdev is transmitting
|
|
|
|
* a burst of packets, keep on copying into the buffer until it is
|
|
|
|
* full or we are done collecting a burst. If there is an existing
|
|
|
|
* buffer with space for the RNDIS descriptor but not the packet, copy
|
|
|
|
* the RNDIS descriptor to the buffer, keeping the packet in place.
|
|
|
|
*
|
|
|
|
* If we do batching and send more than one packet using a single
|
|
|
|
* NetVSC message, free the SKBs of the packets copied, except for the
|
|
|
|
* last packet. This is done to streamline the handling of the case
|
|
|
|
* where the last packet only had the RNDIS descriptor copied to the
|
|
|
|
* send buffer, with the data pointers included in the NetVSC message.
|
|
|
|
*/
|
2017-12-13 08:48:40 +08:00
|
|
|
int netvsc_send(struct net_device *ndev,
|
2015-12-02 08:43:06 +08:00
|
|
|
struct hv_netvsc_packet *packet,
|
2015-12-02 08:43:13 +08:00
|
|
|
struct rndis_message *rndis_msg,
|
2017-07-28 23:59:44 +08:00
|
|
|
struct hv_page_buffer *pb,
|
2020-01-24 05:52:34 +08:00
|
|
|
struct sk_buff *skb,
|
|
|
|
bool xdp_tx)
|
2015-03-27 00:03:37 +08:00
|
|
|
{
|
2017-12-13 08:48:40 +08:00
|
|
|
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
2017-07-20 02:53:19 +08:00
|
|
|
struct netvsc_device *net_device
|
2017-07-28 23:59:42 +08:00
|
|
|
= rcu_dereference_bh(ndev_ctx->nvdev);
|
2017-07-20 02:53:17 +08:00
|
|
|
struct hv_device *device = ndev_ctx->device_ctx;
|
2016-08-24 03:17:55 +08:00
|
|
|
int ret = 0;
|
2017-01-25 05:06:07 +08:00
|
|
|
struct netvsc_channel *nvchan;
|
2015-03-27 00:03:37 +08:00
|
|
|
u32 pktlen = packet->total_data_buflen, msd_len = 0;
|
|
|
|
unsigned int section_index = NETVSC_INVALID_INDEX;
|
|
|
|
struct multi_send_data *msdp;
|
|
|
|
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
|
2016-01-26 01:49:31 +08:00
|
|
|
struct sk_buff *msd_skb = NULL;
|
2017-12-13 08:48:40 +08:00
|
|
|
bool try_batch, xmit_more;
|
2015-03-27 00:03:37 +08:00
|
|
|
|
2017-06-09 07:21:23 +08:00
|
|
|
/* If device is rescinded, return error and packet will get dropped. */
|
2017-07-20 02:53:17 +08:00
|
|
|
if (unlikely(!net_device || net_device->destroy))
|
2015-03-27 00:03:37 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2017-01-25 05:06:07 +08:00
|
|
|
nvchan = &net_device->chan_table[packet->q_idx];
|
2015-03-27 00:03:37 +08:00
|
|
|
packet->send_buf_index = NETVSC_INVALID_INDEX;
|
2015-04-14 07:34:35 +08:00
|
|
|
packet->cp_partial = false;
|
2015-03-27 00:03:37 +08:00
|
|
|
|
2020-01-24 05:52:34 +08:00
|
|
|
/* Send a control message or XDP packet directly without accessing
|
|
|
|
* msd (Multi-Send Data) field which may be changed during data packet
|
|
|
|
* processing.
|
2015-12-11 04:19:35 +08:00
|
|
|
*/
|
2020-01-24 05:52:34 +08:00
|
|
|
if (!skb || xdp_tx)
|
2018-03-03 05:49:01 +08:00
|
|
|
return netvsc_send_pkt(device, packet, net_device, pb, skb);
|
2015-12-11 04:19:35 +08:00
|
|
|
|
2015-03-27 00:03:37 +08:00
|
|
|
/* batch packets in send buffer if possible */
|
2017-01-25 05:06:07 +08:00
|
|
|
msdp = &nvchan->msd;
|
2015-03-27 00:03:37 +08:00
|
|
|
if (msdp->pkt)
|
|
|
|
msd_len = msdp->pkt->total_data_buflen;
|
|
|
|
|
2017-03-23 05:51:04 +08:00
|
|
|
try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
|
2015-04-14 07:34:35 +08:00
|
|
|
if (try_batch && msd_len + pktlen + net_device->pkt_align <
|
2015-03-27 00:03:37 +08:00
|
|
|
net_device->send_section_size) {
|
|
|
|
section_index = msdp->pkt->send_buf_index;
|
|
|
|
|
2015-04-14 07:34:35 +08:00
|
|
|
} else if (try_batch && msd_len + packet->rmsg_size <
|
|
|
|
net_device->send_section_size) {
|
|
|
|
section_index = msdp->pkt->send_buf_index;
|
|
|
|
packet->cp_partial = true;
|
|
|
|
|
2017-03-23 05:51:04 +08:00
|
|
|
} else if (pktlen + net_device->pkt_align <
|
2015-03-27 00:03:37 +08:00
|
|
|
net_device->send_section_size) {
|
|
|
|
section_index = netvsc_get_next_send_section(net_device);
|
2017-08-10 08:46:12 +08:00
|
|
|
if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
|
|
|
|
++ndev_ctx->eth_stats.tx_send_full;
|
|
|
|
} else {
|
2016-01-26 01:49:31 +08:00
|
|
|
move_pkt_msd(&msd_send, &msd_skb, msdp);
|
|
|
|
msd_len = 0;
|
2015-03-27 00:03:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-13 08:48:40 +08:00
|
|
|
/* Keep aggregating only if stack says more data is coming
|
|
|
|
* and not doing mixed modes send and not flow blocked
|
|
|
|
*/
|
2019-04-01 22:42:14 +08:00
|
|
|
xmit_more = netdev_xmit_more() &&
|
2017-12-13 08:48:40 +08:00
|
|
|
!packet->cp_partial &&
|
|
|
|
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
|
|
|
|
|
2015-03-27 00:03:37 +08:00
|
|
|
if (section_index != NETVSC_INVALID_INDEX) {
|
|
|
|
netvsc_copy_to_send_buf(net_device,
|
|
|
|
section_index, msd_len,
|
2017-12-13 08:48:40 +08:00
|
|
|
packet, rndis_msg, pb, xmit_more);
|
2015-03-30 12:08:42 +08:00
|
|
|
|
2015-03-27 00:03:37 +08:00
|
|
|
packet->send_buf_index = section_index;
|
2015-04-14 07:34:35 +08:00
|
|
|
|
|
|
|
if (packet->cp_partial) {
|
|
|
|
packet->page_buf_cnt -= packet->rmsg_pgcnt;
|
|
|
|
packet->total_data_buflen = msd_len + packet->rmsg_size;
|
|
|
|
} else {
|
|
|
|
packet->page_buf_cnt = 0;
|
|
|
|
packet->total_data_buflen += msd_len;
|
|
|
|
}
|
2015-03-27 00:03:37 +08:00
|
|
|
|
2017-01-25 05:06:12 +08:00
|
|
|
if (msdp->pkt) {
|
|
|
|
packet->total_packets += msdp->pkt->total_packets;
|
|
|
|
packet->total_bytes += msdp->pkt->total_bytes;
|
|
|
|
}
|
|
|
|
|
2016-01-26 01:49:31 +08:00
|
|
|
if (msdp->skb)
|
2016-09-23 07:56:29 +08:00
|
|
|
dev_consume_skb_any(msdp->skb);
|
2015-04-07 06:22:54 +08:00
|
|
|
|
2017-12-13 08:48:40 +08:00
|
|
|
if (xmit_more) {
|
2016-01-26 01:49:31 +08:00
|
|
|
msdp->skb = skb;
|
2015-03-27 00:03:37 +08:00
|
|
|
msdp->pkt = packet;
|
|
|
|
msdp->count++;
|
|
|
|
} else {
|
|
|
|
cur_send = packet;
|
2016-01-26 01:49:31 +08:00
|
|
|
msdp->skb = NULL;
|
2015-03-27 00:03:37 +08:00
|
|
|
msdp->pkt = NULL;
|
|
|
|
msdp->count = 0;
|
|
|
|
}
|
|
|
|
} else {
|
2016-01-26 01:49:31 +08:00
|
|
|
move_pkt_msd(&msd_send, &msd_skb, msdp);
|
2015-03-27 00:03:37 +08:00
|
|
|
cur_send = packet;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msd_send) {
|
2016-08-24 03:17:55 +08:00
|
|
|
int m_ret = netvsc_send_pkt(device, msd_send, net_device,
|
|
|
|
NULL, msd_skb);
|
2015-03-27 00:03:37 +08:00
|
|
|
|
|
|
|
if (m_ret != 0) {
|
|
|
|
netvsc_free_send_slot(net_device,
|
|
|
|
msd_send->send_buf_index);
|
2016-01-26 01:49:31 +08:00
|
|
|
dev_kfree_skb_any(msd_skb);
|
2015-03-27 00:03:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cur_send)
|
2016-05-13 19:55:23 +08:00
|
|
|
ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
|
2015-03-27 00:03:37 +08:00
|
|
|
|
2015-05-05 01:57:16 +08:00
|
|
|
if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
|
|
|
|
netvsc_free_send_slot(net_device, section_index);
|
2015-01-30 04:34:49 +08:00
|
|
|
|
2009-07-14 06:34:54 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
/* Send pending recv completions */
|
2017-08-10 08:46:12 +08:00
|
|
|
static int send_recv_completions(struct net_device *ndev,
|
|
|
|
struct netvsc_device *nvdev,
|
|
|
|
struct netvsc_channel *nvchan)
|
2011-04-22 03:30:42 +08:00
|
|
|
{
|
2017-07-28 23:59:45 +08:00
|
|
|
struct multi_recv_comp *mrc = &nvchan->mrc;
|
|
|
|
struct recv_comp_msg {
|
|
|
|
struct nvsp_message_header hdr;
|
|
|
|
u32 status;
|
|
|
|
} __packed;
|
|
|
|
struct recv_comp_msg msg = {
|
|
|
|
.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
|
|
|
|
};
|
2011-04-22 03:30:42 +08:00
|
|
|
int ret;
|
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
while (mrc->first != mrc->next) {
|
|
|
|
const struct recv_comp_data *rcd
|
|
|
|
= mrc->slots + mrc->first;
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
msg.status = rcd->status;
|
|
|
|
ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
|
|
|
|
rcd->tid, VM_PKT_COMP, 0);
|
2017-08-10 08:46:12 +08:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
|
|
|
|
|
|
|
++ndev_ctx->eth_stats.rx_comp_busy;
|
2017-07-28 23:59:45 +08:00
|
|
|
return ret;
|
2017-08-10 08:46:12 +08:00
|
|
|
}
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
if (++mrc->first == nvdev->recv_completion_cnt)
|
|
|
|
mrc->first = 0;
|
|
|
|
}
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
/* receive completion ring has been emptied */
|
|
|
|
if (unlikely(nvdev->destroy))
|
|
|
|
wake_up(&nvdev->wait_drain);
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
return 0;
|
2016-08-20 05:47:09 +08:00
|
|
|
}
|
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
/* Count how many receive completions are outstanding */
|
|
|
|
static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
|
|
|
|
const struct multi_recv_comp *mrc,
|
|
|
|
u32 *filled, u32 *avail)
|
2016-08-20 05:47:09 +08:00
|
|
|
{
|
2017-07-28 23:59:45 +08:00
|
|
|
u32 count = nvdev->recv_completion_cnt;
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
if (mrc->next >= mrc->first)
|
|
|
|
*filled = mrc->next - mrc->first;
|
|
|
|
else
|
|
|
|
*filled = (count - mrc->first) + mrc->next;
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
*avail = count - *filled - 1;
|
2016-08-20 05:47:09 +08:00
|
|
|
}
|
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
/* Add receive complete to ring to send to host. */
|
|
|
|
static void enq_receive_complete(struct net_device *ndev,
|
|
|
|
struct netvsc_device *nvdev, u16 q_idx,
|
|
|
|
u64 tid, u32 status)
|
2016-08-20 05:47:09 +08:00
|
|
|
{
|
2017-07-28 23:59:45 +08:00
|
|
|
struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
|
|
|
|
struct multi_recv_comp *mrc = &nvchan->mrc;
|
2016-08-20 05:47:09 +08:00
|
|
|
struct recv_comp_data *rcd;
|
2017-07-28 23:59:45 +08:00
|
|
|
u32 filled, avail;
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
if (unlikely(filled > NAPI_POLL_WEIGHT)) {
|
2017-08-10 08:46:12 +08:00
|
|
|
send_recv_completions(ndev, nvdev, nvchan);
|
2017-07-28 23:59:45 +08:00
|
|
|
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
|
2011-04-22 03:30:42 +08:00
|
|
|
}
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
if (unlikely(!avail)) {
|
|
|
|
netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
|
|
|
|
q_idx, tid);
|
|
|
|
return;
|
|
|
|
}
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
rcd = mrc->slots + mrc->next;
|
|
|
|
rcd->tid = tid;
|
|
|
|
rcd->status = status;
|
2016-08-20 05:47:09 +08:00
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
if (++mrc->next == nvdev->recv_completion_cnt)
|
|
|
|
mrc->next = 0;
|
2016-08-20 05:47:09 +08:00
|
|
|
}
|
|
|
|
|
2017-02-28 02:26:49 +08:00
|
|
|
static int netvsc_receive(struct net_device *ndev,
|
2017-07-28 23:59:45 +08:00
|
|
|
struct netvsc_device *net_device,
|
2018-09-22 02:20:35 +08:00
|
|
|
struct netvsc_channel *nvchan,
|
2020-09-16 17:47:27 +08:00
|
|
|
const struct vmpacket_descriptor *desc)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2018-04-27 05:34:25 +08:00
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
2018-09-22 02:20:35 +08:00
|
|
|
struct vmbus_channel *channel = nvchan->channel;
|
2017-02-28 02:26:48 +08:00
|
|
|
const struct vmtransfer_page_packet_header *vmxferpage_packet
|
|
|
|
= container_of(desc, const struct vmtransfer_page_packet_header, d);
|
2020-09-16 17:47:27 +08:00
|
|
|
const struct nvsp_message *nvsp = hv_pkt_data(desc);
|
|
|
|
u32 msglen = hv_pkt_datalen(desc);
|
2017-02-28 02:26:49 +08:00
|
|
|
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
2017-01-25 05:06:08 +08:00
|
|
|
char *recv_buf = net_device->recv_buf;
|
2014-04-22 05:54:43 +08:00
|
|
|
u32 status = NVSP_STAT_SUCCESS;
|
2011-12-16 05:45:15 +08:00
|
|
|
int i;
|
|
|
|
int count = 0;
|
2011-04-27 00:20:22 +08:00
|
|
|
|
2020-09-16 17:47:27 +08:00
|
|
|
/* Ensure packet is big enough to read header fields */
|
|
|
|
if (msglen < sizeof(struct nvsp_message_header)) {
|
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
|
"invalid nvsp header, length too small: %u\n",
|
|
|
|
msglen);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Make sure this is a valid nvsp packet */
|
2017-01-25 05:06:08 +08:00
|
|
|
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
|
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
|
"Unknown nvsp packet type received %u\n",
|
|
|
|
nvsp->hdr.msg_type);
|
2017-02-28 02:26:49 +08:00
|
|
|
return 0;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2020-09-16 17:47:27 +08:00
|
|
|
/* Validate xfer page pkt header */
|
|
|
|
if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
|
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
|
"Invalid xfer page pkt, offset too small: %u\n",
|
|
|
|
desc->offset8 << 3);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-25 05:06:08 +08:00
|
|
|
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
|
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
|
"Invalid xfer page set id - expecting %x got %x\n",
|
|
|
|
NETVSC_RECEIVE_BUFFER_ID,
|
|
|
|
vmxferpage_packet->xfer_pageset_id);
|
2017-02-28 02:26:49 +08:00
|
|
|
return 0;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2014-04-22 05:54:43 +08:00
|
|
|
count = vmxferpage_packet->range_cnt;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2020-09-16 17:47:27 +08:00
|
|
|
/* Check count for a valid value */
|
|
|
|
if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
|
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
|
"Range count is not valid: %d\n",
|
|
|
|
count);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
|
2014-04-22 05:54:43 +08:00
|
|
|
for (i = 0; i < count; i++) {
|
2018-03-23 03:01:14 +08:00
|
|
|
u32 offset = vmxferpage_packet->ranges[i].byte_offset;
|
2017-01-25 05:06:08 +08:00
|
|
|
u32 buflen = vmxferpage_packet->ranges[i].byte_count;
|
2018-03-23 03:01:14 +08:00
|
|
|
void *data;
|
2018-03-23 03:01:13 +08:00
|
|
|
int ret;
|
2009-07-14 06:34:54 +08:00
|
|
|
|
2020-09-16 17:47:27 +08:00
|
|
|
if (unlikely(offset > net_device->recv_buf_size ||
|
|
|
|
buflen > net_device->recv_buf_size - offset)) {
|
2018-09-22 02:20:35 +08:00
|
|
|
nvchan->rsc.cnt = 0;
|
2018-03-23 03:01:14 +08:00
|
|
|
status = NVSP_STAT_FAIL;
|
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
|
"Packet offset:%u + len:%u too big\n",
|
|
|
|
offset, buflen);
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-01-27 00:29:07 +08:00
|
|
|
/* We're going to copy (sections of) the packet into nvchan->recv_buf;
|
|
|
|
* make sure that nvchan->recv_buf is large enough to hold the packet.
|
|
|
|
*/
|
|
|
|
if (unlikely(buflen > net_device->recv_section_size)) {
|
|
|
|
nvchan->rsc.cnt = 0;
|
|
|
|
status = NVSP_STAT_FAIL;
|
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
|
"Packet too big: buflen=%u recv_section_size=%u\n",
|
|
|
|
buflen, net_device->recv_section_size);
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-03-23 03:01:14 +08:00
|
|
|
data = recv_buf + offset;
|
|
|
|
|
2018-09-22 02:20:35 +08:00
|
|
|
nvchan->rsc.is_last = (i == count - 1);
|
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_rndis_recv(ndev, q_idx, data);
|
|
|
|
|
2009-07-28 04:47:24 +08:00
|
|
|
/* Pass it to the upper layer */
|
2018-03-23 03:01:13 +08:00
|
|
|
ret = rndis_filter_receive(ndev, net_device,
|
2018-09-22 02:20:35 +08:00
|
|
|
nvchan, data, buflen);
|
2018-03-23 03:01:13 +08:00
|
|
|
|
2021-02-03 19:36:02 +08:00
|
|
|
if (unlikely(ret != NVSP_STAT_SUCCESS)) {
|
|
|
|
/* Drop incomplete packet */
|
|
|
|
nvchan->rsc.cnt = 0;
|
2018-03-23 03:01:13 +08:00
|
|
|
status = NVSP_STAT_FAIL;
|
2021-02-03 19:36:02 +08:00
|
|
|
}
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2017-07-28 23:59:45 +08:00
|
|
|
enq_receive_complete(ndev, net_device, q_idx,
|
|
|
|
vmxferpage_packet->d.trans_id, status);
|
2017-02-28 02:26:49 +08:00
|
|
|
|
|
|
|
return count;
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
|
|
|
|
2018-04-27 05:34:25 +08:00
|
|
|
static void netvsc_send_table(struct net_device *ndev,
|
2019-11-22 05:33:41 +08:00
|
|
|
struct netvsc_device *nvscdev,
|
2019-11-22 05:33:40 +08:00
|
|
|
const struct nvsp_message *nvmsg,
|
|
|
|
u32 msglen)
|
2014-04-22 01:20:28 +08:00
|
|
|
{
|
2017-03-10 06:58:29 +08:00
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
2019-11-22 05:33:40 +08:00
|
|
|
u32 count, offset, *tab;
|
2018-04-27 05:34:25 +08:00
|
|
|
int i;
|
2014-04-22 01:20:28 +08:00
|
|
|
|
2020-09-16 17:47:27 +08:00
|
|
|
/* Ensure packet is big enough to read send_table fields */
|
|
|
|
if (msglen < sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(struct nvsp_5_send_indirect_table)) {
|
|
|
|
netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-04-22 01:20:28 +08:00
|
|
|
count = nvmsg->msg.v5_msg.send_table.count;
|
2019-11-22 05:33:40 +08:00
|
|
|
offset = nvmsg->msg.v5_msg.send_table.offset;
|
|
|
|
|
2014-04-22 01:20:28 +08:00
|
|
|
if (count != VRSS_SEND_TAB_SIZE) {
|
|
|
|
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-22 05:33:41 +08:00
|
|
|
/* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
|
|
|
|
* wrong due to a host bug. So fix the offset here.
|
|
|
|
*/
|
|
|
|
if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
|
|
|
|
msglen >= sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
|
|
|
|
offset = sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(union nvsp_6_message_uber);
|
|
|
|
|
|
|
|
/* Boundary check for all versions */
|
2021-01-15 04:26:28 +08:00
|
|
|
if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
|
2019-11-22 05:33:40 +08:00
|
|
|
netdev_err(ndev, "Received send-table offset too big:%u\n",
|
|
|
|
offset);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tab = (void *)nvmsg + offset;
|
2014-04-22 01:20:28 +08:00
|
|
|
|
|
|
|
for (i = 0; i < count; i++)
|
2017-10-14 03:28:04 +08:00
|
|
|
net_device_ctx->tx_table[i] = tab[i];
|
2014-04-22 01:20:28 +08:00
|
|
|
}
|
|
|
|
|
2018-04-27 05:34:25 +08:00
|
|
|
static void netvsc_send_vf(struct net_device *ndev,
|
2020-09-16 17:47:27 +08:00
|
|
|
const struct nvsp_message *nvmsg,
|
|
|
|
u32 msglen)
|
2015-07-25 01:08:40 +08:00
|
|
|
{
|
2018-04-27 05:34:25 +08:00
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
|
|
|
|
2020-09-16 17:47:27 +08:00
|
|
|
/* Ensure packet is big enough to read its fields */
|
|
|
|
if (msglen < sizeof(struct nvsp_message_header) +
|
|
|
|
sizeof(struct nvsp_4_send_vf_association)) {
|
|
|
|
netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-08-15 23:48:39 +08:00
|
|
|
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
|
|
|
|
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
|
2018-09-15 03:54:57 +08:00
|
|
|
netdev_info(ndev, "VF slot %u %s\n",
|
|
|
|
net_device_ctx->vf_serial,
|
|
|
|
net_device_ctx->vf_alloc ? "added" : "removed");
|
2015-07-25 01:08:40 +08:00
|
|
|
}
|
|
|
|
|
2019-11-22 05:33:40 +08:00
|
|
|
static void netvsc_receive_inband(struct net_device *ndev,
|
2019-11-22 05:33:41 +08:00
|
|
|
struct netvsc_device *nvscdev,
|
2020-09-16 17:47:27 +08:00
|
|
|
const struct vmpacket_descriptor *desc)
|
2015-07-25 01:08:40 +08:00
|
|
|
{
|
2020-09-16 17:47:27 +08:00
|
|
|
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
|
|
|
|
u32 msglen = hv_pkt_datalen(desc);
|
|
|
|
|
|
|
|
/* Ensure packet is big enough to read header fields */
|
|
|
|
if (msglen < sizeof(struct nvsp_message_header)) {
|
|
|
|
netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-25 01:08:40 +08:00
|
|
|
switch (nvmsg->hdr.msg_type) {
|
|
|
|
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
|
2019-11-22 05:33:41 +08:00
|
|
|
netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
|
2015-07-25 01:08:40 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
|
2021-02-01 22:48:14 +08:00
|
|
|
if (hv_is_isolation_supported())
|
|
|
|
netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
|
|
|
|
else
|
|
|
|
netvsc_send_vf(ndev, nvmsg, msglen);
|
2015-07-25 01:08:40 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-28 02:26:49 +08:00
|
|
|
static int netvsc_process_raw_pkt(struct hv_device *device,
|
2018-09-22 02:20:35 +08:00
|
|
|
struct netvsc_channel *nvchan,
|
2017-02-28 02:26:49 +08:00
|
|
|
struct netvsc_device *net_device,
|
|
|
|
struct net_device *ndev,
|
2017-04-08 02:41:19 +08:00
|
|
|
const struct vmpacket_descriptor *desc,
|
|
|
|
int budget)
|
2016-07-06 07:52:46 +08:00
|
|
|
{
|
2018-09-22 02:20:35 +08:00
|
|
|
struct vmbus_channel *channel = nvchan->channel;
|
2018-04-27 05:34:25 +08:00
|
|
|
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
|
2016-07-06 07:52:46 +08:00
|
|
|
|
2018-03-17 06:44:28 +08:00
|
|
|
trace_nvsp_recv(ndev, channel, nvmsg);
|
|
|
|
|
2016-07-06 07:52:46 +08:00
|
|
|
switch (desc->type) {
|
|
|
|
case VM_PKT_COMP:
|
2020-09-16 17:47:27 +08:00
|
|
|
netvsc_send_completion(ndev, net_device, channel, desc, budget);
|
2016-07-06 07:52:46 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VM_PKT_DATA_USING_XFER_PAGES:
|
2020-09-16 17:47:27 +08:00
|
|
|
return netvsc_receive(ndev, net_device, nvchan, desc);
|
2016-07-06 07:52:46 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VM_PKT_DATA_INBAND:
|
2020-09-16 17:47:27 +08:00
|
|
|
netvsc_receive_inband(ndev, net_device, desc);
|
2016-07-06 07:52:46 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
|
2017-03-23 05:50:57 +08:00
|
|
|
desc->type, desc->trans_id);
|
2016-07-06 07:52:46 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-02-28 02:26:49 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
|
|
|
|
{
|
|
|
|
struct vmbus_channel *primary = channel->primary_channel;
|
|
|
|
|
|
|
|
return primary ? primary->device_obj : channel->device_obj;
|
|
|
|
}
|
|
|
|
|
2017-03-17 07:12:38 +08:00
|
|
|
/* Network processing softirq
|
|
|
|
* Process data in incoming ring buffer from host
|
|
|
|
* Stops when ring is empty or budget is met or exceeded.
|
|
|
|
*/
|
2017-02-28 02:26:49 +08:00
|
|
|
int netvsc_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct netvsc_channel *nvchan
|
|
|
|
= container_of(napi, struct netvsc_channel, napi);
|
2017-07-20 02:53:18 +08:00
|
|
|
struct netvsc_device *net_device = nvchan->net_device;
|
2017-02-28 02:26:49 +08:00
|
|
|
struct vmbus_channel *channel = nvchan->channel;
|
|
|
|
struct hv_device *device = netvsc_channel_to_device(channel);
|
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
|
|
|
int work_done = 0;
|
2018-07-18 01:11:13 +08:00
|
|
|
int ret;
|
2017-02-28 02:26:49 +08:00
|
|
|
|
2017-03-23 05:50:57 +08:00
|
|
|
/* If starting a new interval */
|
|
|
|
if (!nvchan->desc)
|
|
|
|
nvchan->desc = hv_pkt_iter_first(channel);
|
2017-02-28 02:26:49 +08:00
|
|
|
|
2017-03-23 05:50:57 +08:00
|
|
|
while (nvchan->desc && work_done < budget) {
|
2018-09-22 02:20:35 +08:00
|
|
|
work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
|
2017-04-08 02:41:19 +08:00
|
|
|
ndev, nvchan->desc, budget);
|
2017-03-23 05:50:57 +08:00
|
|
|
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
|
2017-02-28 02:26:49 +08:00
|
|
|
}
|
|
|
|
|
2018-07-18 01:11:13 +08:00
|
|
|
/* Send any pending receive completions */
|
|
|
|
ret = send_recv_completions(ndev, net_device, nvchan);
|
|
|
|
|
|
|
|
/* If it did not exhaust NAPI budget this time
|
|
|
|
* and not doing busy poll
|
2017-07-28 23:59:47 +08:00
|
|
|
* then re-enable host interrupts
|
2018-07-18 01:11:13 +08:00
|
|
|
* and reschedule if ring is not empty
|
|
|
|
* or sending receive completion failed.
|
2017-03-17 07:12:38 +08:00
|
|
|
*/
|
2018-07-18 01:11:13 +08:00
|
|
|
if (work_done < budget &&
|
2017-02-28 02:26:49 +08:00
|
|
|
napi_complete_done(napi, work_done) &&
|
2018-07-18 01:11:13 +08:00
|
|
|
(ret || hv_end_read(&channel->inbound)) &&
|
2018-03-03 05:49:05 +08:00
|
|
|
napi_schedule_prep(napi)) {
|
2017-07-28 23:59:45 +08:00
|
|
|
hv_begin_read(&channel->inbound);
|
2018-03-03 05:49:05 +08:00
|
|
|
__napi_schedule(napi);
|
2017-07-28 23:59:45 +08:00
|
|
|
}
|
2017-03-23 05:50:57 +08:00
|
|
|
|
|
|
|
/* Driver may overshoot since multiple packets per descriptor */
|
|
|
|
return min(work_done, budget);
|
2016-07-06 07:52:46 +08:00
|
|
|
}
|
|
|
|
|
2017-03-17 07:12:38 +08:00
|
|
|
/* Call back when data is available in host ring buffer.
|
|
|
|
* Processing is deferred until network softirq (NAPI)
|
|
|
|
*/
|
2014-04-22 01:20:28 +08:00
|
|
|
void netvsc_channel_cb(void *context)
|
2009-07-14 06:34:54 +08:00
|
|
|
{
|
2017-03-17 07:12:37 +08:00
|
|
|
struct netvsc_channel *nvchan = context;
|
2017-07-25 01:57:27 +08:00
|
|
|
struct vmbus_channel *channel = nvchan->channel;
|
|
|
|
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
|
|
|
|
|
|
|
/* preload first vmpacket descriptor */
|
|
|
|
prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
|
2017-01-25 05:05:58 +08:00
|
|
|
|
2017-03-23 05:50:57 +08:00
|
|
|
if (napi_schedule_prep(&nvchan->napi)) {
|
2019-01-04 03:43:08 +08:00
|
|
|
/* disable interrupts from host */
|
2017-07-25 01:57:27 +08:00
|
|
|
hv_begin_read(rbi);
|
2017-03-10 07:04:14 +08:00
|
|
|
|
2018-03-03 05:49:06 +08:00
|
|
|
__napi_schedule_irqoff(&nvchan->napi);
|
2017-03-23 05:50:57 +08:00
|
|
|
}
|
2009-07-14 06:34:54 +08:00
|
|
|
}
|
2011-04-22 03:30:40 +08:00
|
|
|
|
2011-04-22 03:30:45 +08:00
|
|
|
/*
|
|
|
|
* netvsc_device_add - Callback when the device belonging to this
|
|
|
|
* driver is added
|
|
|
|
*/
|
2017-07-20 02:53:16 +08:00
|
|
|
struct netvsc_device *netvsc_device_add(struct hv_device *device,
|
|
|
|
const struct netvsc_device_info *device_info)
|
2011-04-22 03:30:45 +08:00
|
|
|
{
|
2016-05-13 19:55:25 +08:00
|
|
|
int i, ret = 0;
|
2011-04-22 03:30:45 +08:00
|
|
|
struct netvsc_device *net_device;
|
2016-05-13 19:55:25 +08:00
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
2011-04-22 03:30:45 +08:00
|
|
|
|
2016-05-13 19:55:25 +08:00
|
|
|
net_device = alloc_net_device();
|
2014-09-04 19:11:23 +08:00
|
|
|
if (!net_device)
|
2017-07-20 02:53:16 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2011-04-22 03:30:45 +08:00
|
|
|
|
2017-10-14 03:28:05 +08:00
|
|
|
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
|
|
|
|
net_device_ctx->tx_table[i] = 0;
|
|
|
|
|
2017-02-28 02:26:49 +08:00
|
|
|
/* Because the device uses NAPI, all the interrupt batching and
|
|
|
|
* control is done via Net softirq, not the channel handling
|
|
|
|
*/
|
|
|
|
set_channel_read_mode(device->channel, HV_CALL_ISR);
|
|
|
|
|
2017-04-07 05:59:21 +08:00
|
|
|
/* If we're reopening the device we may have multiple queues, fill the
|
|
|
|
* chn_table with the default channel to use it before subchannels are
|
|
|
|
* opened.
|
|
|
|
* Initialize the channel state before we open;
|
|
|
|
* we can be interrupted as soon as we open the channel.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
|
|
|
|
struct netvsc_channel *nvchan = &net_device->chan_table[i];
|
|
|
|
|
|
|
|
nvchan->channel = device->channel;
|
2017-07-20 02:53:18 +08:00
|
|
|
nvchan->net_device = net_device;
|
2017-08-02 03:11:12 +08:00
|
|
|
u64_stats_init(&nvchan->tx_stats.syncp);
|
|
|
|
u64_stats_init(&nvchan->rx_stats.syncp);
|
2020-01-24 05:52:34 +08:00
|
|
|
|
2020-12-01 02:52:01 +08:00
|
|
|
ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
|
2020-01-24 05:52:34 +08:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
|
|
|
|
goto cleanup2;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
|
|
|
|
MEM_TYPE_PAGE_SHARED, NULL);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
|
|
|
|
goto cleanup2;
|
|
|
|
}
|
2017-04-07 05:59:21 +08:00
|
|
|
}
|
|
|
|
|
2017-05-04 07:59:21 +08:00
|
|
|
/* Enable NAPI handler before init callbacks */
|
|
|
|
netif_napi_add(ndev, &net_device->chan_table[0].napi,
|
|
|
|
netvsc_poll, NAPI_POLL_WEIGHT);
|
|
|
|
|
2011-04-22 03:30:45 +08:00
|
|
|
/* Open the channel */
|
2020-11-09 18:04:02 +08:00
|
|
|
device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
|
2017-12-02 03:01:47 +08:00
|
|
|
ret = vmbus_open(device->channel, netvsc_ring_bytes,
|
|
|
|
netvsc_ring_bytes, NULL, 0,
|
|
|
|
netvsc_channel_cb, net_device->chan_table);
|
2011-04-22 03:30:45 +08:00
|
|
|
|
|
|
|
if (ret != 0) {
|
2011-09-02 03:19:41 +08:00
|
|
|
netdev_err(ndev, "unable to open channel: %d\n", ret);
|
2011-04-22 03:30:45 +08:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Channel is opened */
|
2016-11-29 01:25:44 +08:00
|
|
|
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
|
2011-04-22 03:30:45 +08:00
|
|
|
|
2017-02-28 02:26:49 +08:00
|
|
|
napi_enable(&net_device->chan_table[0].napi);
|
2016-05-13 19:55:25 +08:00
|
|
|
|
2011-04-22 03:30:45 +08:00
|
|
|
/* Connect with the NetVsp */
|
2017-08-10 08:46:11 +08:00
|
|
|
ret = netvsc_connect_vsp(device, net_device, device_info);
|
2011-04-22 03:30:45 +08:00
|
|
|
if (ret != 0) {
|
2011-09-02 03:19:41 +08:00
|
|
|
netdev_err(ndev,
|
2011-09-02 03:19:40 +08:00
|
|
|
"unable to connect to NetVSP - %d\n", ret);
|
2011-04-22 03:30:45 +08:00
|
|
|
goto close;
|
|
|
|
}
|
|
|
|
|
2018-03-03 05:49:01 +08:00
|
|
|
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
|
|
|
|
* populated.
|
|
|
|
*/
|
|
|
|
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
|
|
|
|
|
2017-07-20 02:53:16 +08:00
|
|
|
return net_device;
|
2011-04-22 03:30:45 +08:00
|
|
|
|
|
|
|
close:
|
2017-07-28 23:59:46 +08:00
|
|
|
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
|
|
|
|
napi_disable(&net_device->chan_table[0].napi);
|
2017-02-28 02:26:49 +08:00
|
|
|
|
2011-04-22 03:30:45 +08:00
|
|
|
/* Now, we can close the channel safely */
|
|
|
|
vmbus_close(device->channel);
|
|
|
|
|
|
|
|
cleanup:
|
2018-03-03 05:49:03 +08:00
|
|
|
netif_napi_del(&net_device->chan_table[0].napi);
|
2020-01-24 05:52:34 +08:00
|
|
|
|
|
|
|
cleanup2:
|
2017-03-23 05:51:00 +08:00
|
|
|
free_netvsc_device(&net_device->rcu);
|
2011-04-22 03:30:45 +08:00
|
|
|
|
2017-07-20 02:53:16 +08:00
|
|
|
return ERR_PTR(ret);
|
2011-04-22 03:30:45 +08:00
|
|
|
}
|