mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
hyperv-next for 5.10
-----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEIbPD0id6easf0xsudhRwX5BBoF4FAl+FqrsTHHdlaS5saXVA a2VybmVsLm9yZwAKCRB2FHBfkEGgXnN8B/4sRg7j9OTzVBlDiXF2vj6vbuplTIH6 JR6S5f4PNjUg4gV6ghzSnsx1zqNhPSOr78zDqYto8vv+wqqj3thmld8+gAnSbKtt yoAa7mhbbN1ryJiwPlZzvX4ApzGZPC7byqEi3+zPIcag6TEl8eyYJOmvY3x1zv8x CsAb57oCC4erD0n4xlTyfuc8TLpO+EiU53PXbR9AovKQHe4m2/8LWyEbmrm5cRUR gx8RxoLkkrqK0unzcmanbm47QodiaOTUpycs3IvaBeWZQsqSgFZdI1RAdTZNg+U+ GT8eMRXAwpgDpilPm/0n1O0PKGAsVh9Lbw8Btb/ggqnjTUlA4Z3Df23E =Wy5n -----END PGP SIGNATURE----- Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux Pull Hyper-V updates from Wei Liu: - a series from Boqun Feng to support page size larger than 4K - a few miscellaneous clean-ups * tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux: hv: clocksource: Add notrace attribute to read_hv_sched_clock_*() functions x86/hyperv: Remove aliases with X64 in their name PCI: hv: Document missing hv_pci_protocol_negotiation() parameter scsi: storvsc: Support PAGE_SIZE larger than 4K Driver: hv: util: Use VMBUS_RING_SIZE() for ringbuffer sizes HID: hyperv: Use VMBUS_RING_SIZE() for ringbuffer sizes Input: hyperv-keyboard: Use VMBUS_RING_SIZE() for ringbuffer sizes hv_netvsc: Use HV_HYP_PAGE_SIZE for Hyper-V communication hv: hyperv.h: Introduce some hvpfn helper functions Drivers: hv: vmbus: Move virt_to_hvpfn() to hyperv header Drivers: hv: Use HV_HYP_PAGE in hv_synic_enable_regs() Drivers: hv: vmbus: Introduce types of GPADL Drivers: hv: vmbus: Move __vmbus_open() Drivers: hv: vmbus: Always use HV_HYP_PAGE_SIZE for gpadl drivers: hv: remove cast from hyperv_die_event
This commit is contained in:
commit
4907a43da8
@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
|
||||
return read_hv_clock_tsc();
|
||||
}
|
||||
|
||||
static u64 read_hv_sched_clock_tsc(void)
|
||||
static u64 notrace read_hv_sched_clock_tsc(void)
|
||||
{
|
||||
return (read_hv_clock_tsc() - hv_sched_clock_offset) *
|
||||
(NSEC_PER_SEC / HV_CLOCK_HZ);
|
||||
@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
|
||||
return read_hv_clock_msr();
|
||||
}
|
||||
|
||||
static u64 read_hv_sched_clock_msr(void)
|
||||
static u64 notrace read_hv_sched_clock_msr(void)
|
||||
{
|
||||
return (read_hv_clock_msr() - hv_sched_clock_offset) *
|
||||
(NSEC_PER_SEC / HV_CLOCK_HZ);
|
||||
|
@ -104,8 +104,8 @@ struct synthhid_input_report {
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#define INPUTVSC_SEND_RING_BUFFER_SIZE (40 * 1024)
|
||||
#define INPUTVSC_RECV_RING_BUFFER_SIZE (40 * 1024)
|
||||
#define INPUTVSC_SEND_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
|
||||
#define INPUTVSC_RECV_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
|
||||
|
||||
|
||||
enum pipe_prot_msg_type {
|
||||
|
@ -22,20 +22,97 @@
|
||||
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
#define NUM_PAGES_SPANNED(addr, len) \
|
||||
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
|
||||
|
||||
static unsigned long virt_to_hvpfn(void *addr)
|
||||
/*
|
||||
* hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
|
||||
*
|
||||
* For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
|
||||
*
|
||||
* For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
|
||||
* (because of the alignment requirement), however, the hypervisor only
|
||||
* uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
|
||||
* (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
|
||||
* ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
|
||||
* total size that the guest uses minus twice of the gap size.
|
||||
*/
|
||||
static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
|
||||
{
|
||||
phys_addr_t paddr;
|
||||
switch (type) {
|
||||
case HV_GPADL_BUFFER:
|
||||
return size;
|
||||
case HV_GPADL_RING:
|
||||
/* The size of a ringbuffer must be page-aligned */
|
||||
BUG_ON(size % PAGE_SIZE);
|
||||
/*
|
||||
* Two things to notice here:
|
||||
* 1) We're processing two ring buffers as a unit
|
||||
* 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
|
||||
* the first guest-size page of each of the two ring buffers.
|
||||
* So we effectively subtract out two guest-size pages, and add
|
||||
* back two Hyper-V size pages.
|
||||
*/
|
||||
return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
|
||||
}
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_vmalloc_addr(addr))
|
||||
paddr = page_to_phys(vmalloc_to_page(addr)) +
|
||||
offset_in_page(addr);
|
||||
else
|
||||
paddr = __pa(addr);
|
||||
/*
|
||||
* hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
|
||||
* HV_HYP_PAGE) in a ring gpadl based on the
|
||||
* offset in the guest
|
||||
*
|
||||
* @offset: the offset (in bytes) where the send ringbuffer starts in the
|
||||
* virtual address space of the guest
|
||||
*/
|
||||
static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
|
||||
{
|
||||
|
||||
return paddr >> PAGE_SHIFT;
|
||||
/*
|
||||
* For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
|
||||
* header (because of the alignment requirement), however, the
|
||||
* hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
|
||||
* therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
|
||||
*
|
||||
* And to calculate the effective send offset in gpadl, we need to
|
||||
* substract this gap.
|
||||
*/
|
||||
return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
|
||||
* the gpadl
|
||||
*
|
||||
* @type: the type of the gpadl
|
||||
* @kbuffer: the pointer to the gpadl in the guest
|
||||
* @size: the total size (in bytes) of the gpadl
|
||||
* @send_offset: the offset (in bytes) where the send ringbuffer starts in the
|
||||
* virtual address space of the guest
|
||||
* @i: the index
|
||||
*/
|
||||
static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
|
||||
u32 size, u32 send_offset, int i)
|
||||
{
|
||||
int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
|
||||
unsigned long delta = 0UL;
|
||||
|
||||
switch (type) {
|
||||
case HV_GPADL_BUFFER:
|
||||
break;
|
||||
case HV_GPADL_RING:
|
||||
if (i == 0)
|
||||
delta = 0;
|
||||
else if (i <= send_idx)
|
||||
delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
|
||||
else
|
||||
delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -112,6 +189,320 @@ int vmbus_alloc_ring(struct vmbus_channel *newchannel,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
|
||||
|
||||
/* Used for Hyper-V Socket: a guest client's connect() to the host */
|
||||
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
|
||||
const guid_t *shv_host_servie_id)
|
||||
{
|
||||
struct vmbus_channel_tl_connect_request conn_msg;
|
||||
int ret;
|
||||
|
||||
memset(&conn_msg, 0, sizeof(conn_msg));
|
||||
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
|
||||
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
|
||||
conn_msg.host_service_id = *shv_host_servie_id;
|
||||
|
||||
ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
|
||||
|
||||
trace_vmbus_send_tl_connect_request(&conn_msg, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
|
||||
|
||||
/*
|
||||
* Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
|
||||
*
|
||||
* CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
|
||||
* ACK such messages. IOW we can't know when the host will stop interrupting
|
||||
* the "old" vCPU and start interrupting the "new" vCPU for the given channel.
|
||||
*
|
||||
* The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
|
||||
* VERSION_WIN10_V4_1.
|
||||
*/
|
||||
int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
|
||||
{
|
||||
struct vmbus_channel_modifychannel conn_msg;
|
||||
int ret;
|
||||
|
||||
memset(&conn_msg, 0, sizeof(conn_msg));
|
||||
conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
|
||||
conn_msg.child_relid = child_relid;
|
||||
conn_msg.target_vp = target_vp;
|
||||
|
||||
ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
|
||||
|
||||
trace_vmbus_send_modifychannel(&conn_msg, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
|
||||
|
||||
/*
|
||||
* create_gpadl_header - Creates a gpadl for the specified buffer
|
||||
*/
|
||||
static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
|
||||
u32 size, u32 send_offset,
|
||||
struct vmbus_channel_msginfo **msginfo)
|
||||
{
|
||||
int i;
|
||||
int pagecount;
|
||||
struct vmbus_channel_gpadl_header *gpadl_header;
|
||||
struct vmbus_channel_gpadl_body *gpadl_body;
|
||||
struct vmbus_channel_msginfo *msgheader;
|
||||
struct vmbus_channel_msginfo *msgbody = NULL;
|
||||
u32 msgsize;
|
||||
|
||||
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
|
||||
|
||||
pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
|
||||
|
||||
/* do we need a gpadl body msg */
|
||||
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
|
||||
sizeof(struct vmbus_channel_gpadl_header) -
|
||||
sizeof(struct gpa_range);
|
||||
pfncount = pfnsize / sizeof(u64);
|
||||
|
||||
if (pagecount > pfncount) {
|
||||
/* we need a gpadl body */
|
||||
/* fill in the header */
|
||||
msgsize = sizeof(struct vmbus_channel_msginfo) +
|
||||
sizeof(struct vmbus_channel_gpadl_header) +
|
||||
sizeof(struct gpa_range) + pfncount * sizeof(u64);
|
||||
msgheader = kzalloc(msgsize, GFP_KERNEL);
|
||||
if (!msgheader)
|
||||
goto nomem;
|
||||
|
||||
INIT_LIST_HEAD(&msgheader->submsglist);
|
||||
msgheader->msgsize = msgsize;
|
||||
|
||||
gpadl_header = (struct vmbus_channel_gpadl_header *)
|
||||
msgheader->msg;
|
||||
gpadl_header->rangecount = 1;
|
||||
gpadl_header->range_buflen = sizeof(struct gpa_range) +
|
||||
pagecount * sizeof(u64);
|
||||
gpadl_header->range[0].byte_offset = 0;
|
||||
gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
|
||||
for (i = 0; i < pfncount; i++)
|
||||
gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
|
||||
type, kbuffer, size, send_offset, i);
|
||||
*msginfo = msgheader;
|
||||
|
||||
pfnsum = pfncount;
|
||||
pfnleft = pagecount - pfncount;
|
||||
|
||||
/* how many pfns can we fit */
|
||||
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
|
||||
sizeof(struct vmbus_channel_gpadl_body);
|
||||
pfncount = pfnsize / sizeof(u64);
|
||||
|
||||
/* fill in the body */
|
||||
while (pfnleft) {
|
||||
if (pfnleft > pfncount)
|
||||
pfncurr = pfncount;
|
||||
else
|
||||
pfncurr = pfnleft;
|
||||
|
||||
msgsize = sizeof(struct vmbus_channel_msginfo) +
|
||||
sizeof(struct vmbus_channel_gpadl_body) +
|
||||
pfncurr * sizeof(u64);
|
||||
msgbody = kzalloc(msgsize, GFP_KERNEL);
|
||||
|
||||
if (!msgbody) {
|
||||
struct vmbus_channel_msginfo *pos = NULL;
|
||||
struct vmbus_channel_msginfo *tmp = NULL;
|
||||
/*
|
||||
* Free up all the allocated messages.
|
||||
*/
|
||||
list_for_each_entry_safe(pos, tmp,
|
||||
&msgheader->submsglist,
|
||||
msglistentry) {
|
||||
|
||||
list_del(&pos->msglistentry);
|
||||
kfree(pos);
|
||||
}
|
||||
|
||||
goto nomem;
|
||||
}
|
||||
|
||||
msgbody->msgsize = msgsize;
|
||||
gpadl_body =
|
||||
(struct vmbus_channel_gpadl_body *)msgbody->msg;
|
||||
|
||||
/*
|
||||
* Gpadl is u32 and we are using a pointer which could
|
||||
* be 64-bit
|
||||
* This is governed by the guest/host protocol and
|
||||
* so the hypervisor guarantees that this is ok.
|
||||
*/
|
||||
for (i = 0; i < pfncurr; i++)
|
||||
gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
|
||||
kbuffer, size, send_offset, pfnsum + i);
|
||||
|
||||
/* add to msg header */
|
||||
list_add_tail(&msgbody->msglistentry,
|
||||
&msgheader->submsglist);
|
||||
pfnsum += pfncurr;
|
||||
pfnleft -= pfncurr;
|
||||
}
|
||||
} else {
|
||||
/* everything fits in a header */
|
||||
msgsize = sizeof(struct vmbus_channel_msginfo) +
|
||||
sizeof(struct vmbus_channel_gpadl_header) +
|
||||
sizeof(struct gpa_range) + pagecount * sizeof(u64);
|
||||
msgheader = kzalloc(msgsize, GFP_KERNEL);
|
||||
if (msgheader == NULL)
|
||||
goto nomem;
|
||||
|
||||
INIT_LIST_HEAD(&msgheader->submsglist);
|
||||
msgheader->msgsize = msgsize;
|
||||
|
||||
gpadl_header = (struct vmbus_channel_gpadl_header *)
|
||||
msgheader->msg;
|
||||
gpadl_header->rangecount = 1;
|
||||
gpadl_header->range_buflen = sizeof(struct gpa_range) +
|
||||
pagecount * sizeof(u64);
|
||||
gpadl_header->range[0].byte_offset = 0;
|
||||
gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
|
||||
for (i = 0; i < pagecount; i++)
|
||||
gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
|
||||
type, kbuffer, size, send_offset, i);
|
||||
|
||||
*msginfo = msgheader;
|
||||
}
|
||||
|
||||
return 0;
|
||||
nomem:
|
||||
kfree(msgheader);
|
||||
kfree(msgbody);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
|
||||
*
|
||||
* @channel: a channel
|
||||
* @type: the type of the corresponding GPADL, only meaningful for the guest.
|
||||
* @kbuffer: from kmalloc or vmalloc
|
||||
* @size: page-size multiple
|
||||
* @send_offset: the offset (in bytes) where the send ring buffer starts,
|
||||
* should be 0 for BUFFER type gpadl
|
||||
* @gpadl_handle: some funky thing
|
||||
*/
|
||||
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
|
||||
enum hv_gpadl_type type, void *kbuffer,
|
||||
u32 size, u32 send_offset,
|
||||
u32 *gpadl_handle)
|
||||
{
|
||||
struct vmbus_channel_gpadl_header *gpadlmsg;
|
||||
struct vmbus_channel_gpadl_body *gpadl_body;
|
||||
struct vmbus_channel_msginfo *msginfo = NULL;
|
||||
struct vmbus_channel_msginfo *submsginfo, *tmp;
|
||||
struct list_head *curr;
|
||||
u32 next_gpadl_handle;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
next_gpadl_handle =
|
||||
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
|
||||
|
||||
ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
init_completion(&msginfo->waitevent);
|
||||
msginfo->waiting_channel = channel;
|
||||
|
||||
gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
|
||||
gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
|
||||
gpadlmsg->child_relid = channel->offermsg.child_relid;
|
||||
gpadlmsg->gpadl = next_gpadl_handle;
|
||||
|
||||
|
||||
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_add_tail(&msginfo->msglistentry,
|
||||
&vmbus_connection.chn_msg_list);
|
||||
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
|
||||
if (channel->rescind) {
|
||||
ret = -ENODEV;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
|
||||
sizeof(*msginfo), true);
|
||||
|
||||
trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
|
||||
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
|
||||
list_for_each(curr, &msginfo->submsglist) {
|
||||
submsginfo = (struct vmbus_channel_msginfo *)curr;
|
||||
gpadl_body =
|
||||
(struct vmbus_channel_gpadl_body *)submsginfo->msg;
|
||||
|
||||
gpadl_body->header.msgtype =
|
||||
CHANNELMSG_GPADL_BODY;
|
||||
gpadl_body->gpadl = next_gpadl_handle;
|
||||
|
||||
ret = vmbus_post_msg(gpadl_body,
|
||||
submsginfo->msgsize - sizeof(*submsginfo),
|
||||
true);
|
||||
|
||||
trace_vmbus_establish_gpadl_body(gpadl_body, ret);
|
||||
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
|
||||
}
|
||||
wait_for_completion(&msginfo->waitevent);
|
||||
|
||||
if (msginfo->response.gpadl_created.creation_status != 0) {
|
||||
pr_err("Failed to establish GPADL: err = 0x%x\n",
|
||||
msginfo->response.gpadl_created.creation_status);
|
||||
|
||||
ret = -EDQUOT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (channel->rescind) {
|
||||
ret = -ENODEV;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* At this point, we received the gpadl created msg */
|
||||
*gpadl_handle = gpadlmsg->gpadl;
|
||||
|
||||
cleanup:
|
||||
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_del(&msginfo->msglistentry);
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
|
||||
msglistentry) {
|
||||
kfree(submsginfo);
|
||||
}
|
||||
|
||||
kfree(msginfo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmbus_establish_gpadl - Establish a GPADL for the specified buffer
|
||||
*
|
||||
* @channel: a channel
|
||||
* @kbuffer: from kmalloc or vmalloc
|
||||
* @size: page-size multiple
|
||||
* @gpadl_handle: some funky thing
|
||||
*/
|
||||
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||||
u32 size, u32 *gpadl_handle)
|
||||
{
|
||||
return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
|
||||
0U, gpadl_handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
|
||||
|
||||
static int __vmbus_open(struct vmbus_channel *newchannel,
|
||||
void *userdata, u32 userdatalen,
|
||||
void (*onchannelcallback)(void *context), void *context)
|
||||
@ -148,10 +539,11 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
|
||||
/* Establish the gpadl for the ring buffer */
|
||||
newchannel->ringbuffer_gpadlhandle = 0;
|
||||
|
||||
err = vmbus_establish_gpadl(newchannel,
|
||||
page_address(newchannel->ringbuffer_page),
|
||||
(send_pages + recv_pages) << PAGE_SHIFT,
|
||||
&newchannel->ringbuffer_gpadlhandle);
|
||||
err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
|
||||
page_address(newchannel->ringbuffer_page),
|
||||
(send_pages + recv_pages) << PAGE_SHIFT,
|
||||
newchannel->ringbuffer_send_offset << PAGE_SHIFT,
|
||||
&newchannel->ringbuffer_gpadlhandle);
|
||||
if (err)
|
||||
goto error_clean_ring;
|
||||
|
||||
@ -172,7 +564,13 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
|
||||
open_msg->openid = newchannel->offermsg.child_relid;
|
||||
open_msg->child_relid = newchannel->offermsg.child_relid;
|
||||
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
|
||||
open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
|
||||
/*
|
||||
* The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
|
||||
* the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
|
||||
* here we calculate it into HV_HYP_PAGE.
|
||||
*/
|
||||
open_msg->downstream_ringbuffer_pageoffset =
|
||||
hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
|
||||
open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
|
||||
|
||||
if (userdatalen)
|
||||
@ -266,299 +664,6 @@ int vmbus_open(struct vmbus_channel *newchannel,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_open);
|
||||
|
||||
/* Used for Hyper-V Socket: a guest client's connect() to the host */
|
||||
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
|
||||
const guid_t *shv_host_servie_id)
|
||||
{
|
||||
struct vmbus_channel_tl_connect_request conn_msg;
|
||||
int ret;
|
||||
|
||||
memset(&conn_msg, 0, sizeof(conn_msg));
|
||||
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
|
||||
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
|
||||
conn_msg.host_service_id = *shv_host_servie_id;
|
||||
|
||||
ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
|
||||
|
||||
trace_vmbus_send_tl_connect_request(&conn_msg, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
|
||||
|
||||
/*
|
||||
* Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
|
||||
*
|
||||
* CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
|
||||
* ACK such messages. IOW we can't know when the host will stop interrupting
|
||||
* the "old" vCPU and start interrupting the "new" vCPU for the given channel.
|
||||
*
|
||||
* The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
|
||||
* VERSION_WIN10_V4_1.
|
||||
*/
|
||||
int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
|
||||
{
|
||||
struct vmbus_channel_modifychannel conn_msg;
|
||||
int ret;
|
||||
|
||||
memset(&conn_msg, 0, sizeof(conn_msg));
|
||||
conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
|
||||
conn_msg.child_relid = child_relid;
|
||||
conn_msg.target_vp = target_vp;
|
||||
|
||||
ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
|
||||
|
||||
trace_vmbus_send_modifychannel(&conn_msg, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
|
||||
|
||||
/*
|
||||
* create_gpadl_header - Creates a gpadl for the specified buffer
|
||||
*/
|
||||
static int create_gpadl_header(void *kbuffer, u32 size,
|
||||
struct vmbus_channel_msginfo **msginfo)
|
||||
{
|
||||
int i;
|
||||
int pagecount;
|
||||
struct vmbus_channel_gpadl_header *gpadl_header;
|
||||
struct vmbus_channel_gpadl_body *gpadl_body;
|
||||
struct vmbus_channel_msginfo *msgheader;
|
||||
struct vmbus_channel_msginfo *msgbody = NULL;
|
||||
u32 msgsize;
|
||||
|
||||
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
|
||||
|
||||
pagecount = size >> PAGE_SHIFT;
|
||||
|
||||
/* do we need a gpadl body msg */
|
||||
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
|
||||
sizeof(struct vmbus_channel_gpadl_header) -
|
||||
sizeof(struct gpa_range);
|
||||
pfncount = pfnsize / sizeof(u64);
|
||||
|
||||
if (pagecount > pfncount) {
|
||||
/* we need a gpadl body */
|
||||
/* fill in the header */
|
||||
msgsize = sizeof(struct vmbus_channel_msginfo) +
|
||||
sizeof(struct vmbus_channel_gpadl_header) +
|
||||
sizeof(struct gpa_range) + pfncount * sizeof(u64);
|
||||
msgheader = kzalloc(msgsize, GFP_KERNEL);
|
||||
if (!msgheader)
|
||||
goto nomem;
|
||||
|
||||
INIT_LIST_HEAD(&msgheader->submsglist);
|
||||
msgheader->msgsize = msgsize;
|
||||
|
||||
gpadl_header = (struct vmbus_channel_gpadl_header *)
|
||||
msgheader->msg;
|
||||
gpadl_header->rangecount = 1;
|
||||
gpadl_header->range_buflen = sizeof(struct gpa_range) +
|
||||
pagecount * sizeof(u64);
|
||||
gpadl_header->range[0].byte_offset = 0;
|
||||
gpadl_header->range[0].byte_count = size;
|
||||
for (i = 0; i < pfncount; i++)
|
||||
gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
|
||||
kbuffer + PAGE_SIZE * i);
|
||||
*msginfo = msgheader;
|
||||
|
||||
pfnsum = pfncount;
|
||||
pfnleft = pagecount - pfncount;
|
||||
|
||||
/* how many pfns can we fit */
|
||||
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
|
||||
sizeof(struct vmbus_channel_gpadl_body);
|
||||
pfncount = pfnsize / sizeof(u64);
|
||||
|
||||
/* fill in the body */
|
||||
while (pfnleft) {
|
||||
if (pfnleft > pfncount)
|
||||
pfncurr = pfncount;
|
||||
else
|
||||
pfncurr = pfnleft;
|
||||
|
||||
msgsize = sizeof(struct vmbus_channel_msginfo) +
|
||||
sizeof(struct vmbus_channel_gpadl_body) +
|
||||
pfncurr * sizeof(u64);
|
||||
msgbody = kzalloc(msgsize, GFP_KERNEL);
|
||||
|
||||
if (!msgbody) {
|
||||
struct vmbus_channel_msginfo *pos = NULL;
|
||||
struct vmbus_channel_msginfo *tmp = NULL;
|
||||
/*
|
||||
* Free up all the allocated messages.
|
||||
*/
|
||||
list_for_each_entry_safe(pos, tmp,
|
||||
&msgheader->submsglist,
|
||||
msglistentry) {
|
||||
|
||||
list_del(&pos->msglistentry);
|
||||
kfree(pos);
|
||||
}
|
||||
|
||||
goto nomem;
|
||||
}
|
||||
|
||||
msgbody->msgsize = msgsize;
|
||||
gpadl_body =
|
||||
(struct vmbus_channel_gpadl_body *)msgbody->msg;
|
||||
|
||||
/*
|
||||
* Gpadl is u32 and we are using a pointer which could
|
||||
* be 64-bit
|
||||
* This is governed by the guest/host protocol and
|
||||
* so the hypervisor guarantees that this is ok.
|
||||
*/
|
||||
for (i = 0; i < pfncurr; i++)
|
||||
gpadl_body->pfn[i] = virt_to_hvpfn(
|
||||
kbuffer + PAGE_SIZE * (pfnsum + i));
|
||||
|
||||
/* add to msg header */
|
||||
list_add_tail(&msgbody->msglistentry,
|
||||
&msgheader->submsglist);
|
||||
pfnsum += pfncurr;
|
||||
pfnleft -= pfncurr;
|
||||
}
|
||||
} else {
|
||||
/* everything fits in a header */
|
||||
msgsize = sizeof(struct vmbus_channel_msginfo) +
|
||||
sizeof(struct vmbus_channel_gpadl_header) +
|
||||
sizeof(struct gpa_range) + pagecount * sizeof(u64);
|
||||
msgheader = kzalloc(msgsize, GFP_KERNEL);
|
||||
if (msgheader == NULL)
|
||||
goto nomem;
|
||||
|
||||
INIT_LIST_HEAD(&msgheader->submsglist);
|
||||
msgheader->msgsize = msgsize;
|
||||
|
||||
gpadl_header = (struct vmbus_channel_gpadl_header *)
|
||||
msgheader->msg;
|
||||
gpadl_header->rangecount = 1;
|
||||
gpadl_header->range_buflen = sizeof(struct gpa_range) +
|
||||
pagecount * sizeof(u64);
|
||||
gpadl_header->range[0].byte_offset = 0;
|
||||
gpadl_header->range[0].byte_count = size;
|
||||
for (i = 0; i < pagecount; i++)
|
||||
gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
|
||||
kbuffer + PAGE_SIZE * i);
|
||||
|
||||
*msginfo = msgheader;
|
||||
}
|
||||
|
||||
return 0;
|
||||
nomem:
|
||||
kfree(msgheader);
|
||||
kfree(msgbody);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmbus_establish_gpadl - Establish a GPADL for the specified buffer
|
||||
*
|
||||
* @channel: a channel
|
||||
* @kbuffer: from kmalloc or vmalloc
|
||||
* @size: page-size multiple
|
||||
* @gpadl_handle: some funky thing
|
||||
*/
|
||||
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||||
u32 size, u32 *gpadl_handle)
|
||||
{
|
||||
struct vmbus_channel_gpadl_header *gpadlmsg;
|
||||
struct vmbus_channel_gpadl_body *gpadl_body;
|
||||
struct vmbus_channel_msginfo *msginfo = NULL;
|
||||
struct vmbus_channel_msginfo *submsginfo, *tmp;
|
||||
struct list_head *curr;
|
||||
u32 next_gpadl_handle;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
next_gpadl_handle =
|
||||
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
|
||||
|
||||
ret = create_gpadl_header(kbuffer, size, &msginfo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
init_completion(&msginfo->waitevent);
|
||||
msginfo->waiting_channel = channel;
|
||||
|
||||
gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
|
||||
gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
|
||||
gpadlmsg->child_relid = channel->offermsg.child_relid;
|
||||
gpadlmsg->gpadl = next_gpadl_handle;
|
||||
|
||||
|
||||
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_add_tail(&msginfo->msglistentry,
|
||||
&vmbus_connection.chn_msg_list);
|
||||
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
|
||||
if (channel->rescind) {
|
||||
ret = -ENODEV;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
|
||||
sizeof(*msginfo), true);
|
||||
|
||||
trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
|
||||
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
|
||||
list_for_each(curr, &msginfo->submsglist) {
|
||||
submsginfo = (struct vmbus_channel_msginfo *)curr;
|
||||
gpadl_body =
|
||||
(struct vmbus_channel_gpadl_body *)submsginfo->msg;
|
||||
|
||||
gpadl_body->header.msgtype =
|
||||
CHANNELMSG_GPADL_BODY;
|
||||
gpadl_body->gpadl = next_gpadl_handle;
|
||||
|
||||
ret = vmbus_post_msg(gpadl_body,
|
||||
submsginfo->msgsize - sizeof(*submsginfo),
|
||||
true);
|
||||
|
||||
trace_vmbus_establish_gpadl_body(gpadl_body, ret);
|
||||
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
|
||||
}
|
||||
wait_for_completion(&msginfo->waitevent);
|
||||
|
||||
if (msginfo->response.gpadl_created.creation_status != 0) {
|
||||
pr_err("Failed to establish GPADL: err = 0x%x\n",
|
||||
msginfo->response.gpadl_created.creation_status);
|
||||
|
||||
ret = -EDQUOT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (channel->rescind) {
|
||||
ret = -ENODEV;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* At this point, we received the gpadl created msg */
|
||||
*gpadl_handle = gpadlmsg->gpadl;
|
||||
|
||||
cleanup:
|
||||
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_del(&msginfo->msglistentry);
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
|
||||
msglistentry) {
|
||||
kfree(submsginfo);
|
||||
}
|
||||
|
||||
kfree(msginfo);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
|
||||
|
||||
/*
|
||||
* vmbus_teardown_gpadl -Teardown the specified GPADL handle
|
||||
*/
|
||||
|
@ -165,7 +165,7 @@ void hv_synic_enable_regs(unsigned int cpu)
|
||||
hv_get_simp(simp.as_uint64);
|
||||
simp.simp_enabled = 1;
|
||||
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
|
||||
>> PAGE_SHIFT;
|
||||
>> HV_HYP_PAGE_SHIFT;
|
||||
|
||||
hv_set_simp(simp.as_uint64);
|
||||
|
||||
@ -173,7 +173,7 @@ void hv_synic_enable_regs(unsigned int cpu)
|
||||
hv_get_siefp(siefp.as_uint64);
|
||||
siefp.siefp_enabled = 1;
|
||||
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
|
||||
>> PAGE_SHIFT;
|
||||
>> HV_HYP_PAGE_SHIFT;
|
||||
|
||||
hv_set_siefp(siefp.as_uint64);
|
||||
|
||||
|
@ -500,6 +500,9 @@ static void heartbeat_onchannelcallback(void *context)
|
||||
}
|
||||
}
|
||||
|
||||
#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
|
||||
#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
|
||||
|
||||
static int util_probe(struct hv_device *dev,
|
||||
const struct hv_vmbus_device_id *dev_id)
|
||||
{
|
||||
@ -530,8 +533,8 @@ static int util_probe(struct hv_device *dev,
|
||||
|
||||
hv_set_drvdata(dev, srv);
|
||||
|
||||
ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
|
||||
4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
|
||||
ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
|
||||
HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
|
||||
dev->channel);
|
||||
if (ret)
|
||||
goto error;
|
||||
@ -590,8 +593,8 @@ static int util_resume(struct hv_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
|
||||
4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
|
||||
ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
|
||||
HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
|
||||
dev->channel);
|
||||
return ret;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
|
||||
static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
|
||||
void *args)
|
||||
{
|
||||
struct die_args *die = (struct die_args *)args;
|
||||
struct die_args *die = args;
|
||||
struct pt_regs *regs = die->regs;
|
||||
|
||||
/* Don't notify Hyper-V if the die event is other than oops */
|
||||
|
@ -75,8 +75,8 @@ struct synth_kbd_keystroke {
|
||||
|
||||
#define HK_MAXIMUM_MESSAGE_SIZE 256
|
||||
|
||||
#define KBD_VSC_SEND_RING_BUFFER_SIZE (40 * 1024)
|
||||
#define KBD_VSC_RECV_RING_BUFFER_SIZE (40 * 1024)
|
||||
#define KBD_VSC_SEND_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
|
||||
#define KBD_VSC_RECV_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
|
||||
|
||||
#define XTKBD_EMUL0 0xe0
|
||||
#define XTKBD_EMUL1 0xe1
|
||||
|
@ -846,7 +846,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
||||
}
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
|
||||
char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
|
||||
u32 offset = pb[i].offset;
|
||||
u32 len = pb[i].len;
|
||||
|
||||
|
@ -373,32 +373,29 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
return txq;
|
||||
}
|
||||
|
||||
static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
|
||||
static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
|
||||
struct hv_page_buffer *pb)
|
||||
{
|
||||
int j = 0;
|
||||
|
||||
/* Deal with compound pages by ignoring unused part
|
||||
* of the page.
|
||||
*/
|
||||
page += (offset >> PAGE_SHIFT);
|
||||
offset &= ~PAGE_MASK;
|
||||
hvpfn += offset >> HV_HYP_PAGE_SHIFT;
|
||||
offset = offset & ~HV_HYP_PAGE_MASK;
|
||||
|
||||
while (len > 0) {
|
||||
unsigned long bytes;
|
||||
|
||||
bytes = PAGE_SIZE - offset;
|
||||
bytes = HV_HYP_PAGE_SIZE - offset;
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
pb[j].pfn = page_to_pfn(page);
|
||||
pb[j].pfn = hvpfn;
|
||||
pb[j].offset = offset;
|
||||
pb[j].len = bytes;
|
||||
|
||||
offset += bytes;
|
||||
len -= bytes;
|
||||
|
||||
if (offset == PAGE_SIZE && len) {
|
||||
page++;
|
||||
if (offset == HV_HYP_PAGE_SIZE && len) {
|
||||
hvpfn++;
|
||||
offset = 0;
|
||||
j++;
|
||||
}
|
||||
@ -421,23 +418,26 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
|
||||
* 2. skb linear data
|
||||
* 3. skb fragment data
|
||||
*/
|
||||
slots_used += fill_pg_buf(virt_to_page(hdr),
|
||||
offset_in_page(hdr),
|
||||
len, &pb[slots_used]);
|
||||
slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
|
||||
offset_in_hvpage(hdr),
|
||||
len,
|
||||
&pb[slots_used]);
|
||||
|
||||
packet->rmsg_size = len;
|
||||
packet->rmsg_pgcnt = slots_used;
|
||||
|
||||
slots_used += fill_pg_buf(virt_to_page(data),
|
||||
offset_in_page(data),
|
||||
skb_headlen(skb), &pb[slots_used]);
|
||||
slots_used += fill_pg_buf(virt_to_hvpfn(data),
|
||||
offset_in_hvpage(data),
|
||||
skb_headlen(skb),
|
||||
&pb[slots_used]);
|
||||
|
||||
for (i = 0; i < frags; i++) {
|
||||
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
||||
|
||||
slots_used += fill_pg_buf(skb_frag_page(frag),
|
||||
skb_frag_off(frag),
|
||||
skb_frag_size(frag), &pb[slots_used]);
|
||||
slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
|
||||
skb_frag_off(frag),
|
||||
skb_frag_size(frag),
|
||||
&pb[slots_used]);
|
||||
}
|
||||
return slots_used;
|
||||
}
|
||||
@ -453,8 +453,8 @@ static int count_skb_frag_slots(struct sk_buff *skb)
|
||||
unsigned long offset = skb_frag_off(frag);
|
||||
|
||||
/* Skip unused frames from start of page */
|
||||
offset &= ~PAGE_MASK;
|
||||
pages += PFN_UP(offset + size);
|
||||
offset &= ~HV_HYP_PAGE_MASK;
|
||||
pages += HVPFN_UP(offset + size);
|
||||
}
|
||||
return pages;
|
||||
}
|
||||
@ -462,12 +462,12 @@ static int count_skb_frag_slots(struct sk_buff *skb)
|
||||
static int netvsc_get_slots(struct sk_buff *skb)
|
||||
{
|
||||
char *data = skb->data;
|
||||
unsigned int offset = offset_in_page(data);
|
||||
unsigned int offset = offset_in_hvpage(data);
|
||||
unsigned int len = skb_headlen(skb);
|
||||
int slots;
|
||||
int frag_slots;
|
||||
|
||||
slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
|
||||
slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
|
||||
frag_slots = count_skb_frag_slots(skb);
|
||||
return slots + frag_slots;
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
static void rndis_set_multicast(struct work_struct *w);
|
||||
|
||||
#define RNDIS_EXT_LEN PAGE_SIZE
|
||||
#define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
|
||||
struct rndis_request {
|
||||
struct list_head list_ent;
|
||||
struct completion wait_event;
|
||||
@ -215,18 +215,17 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
||||
packet->page_buf_cnt = 1;
|
||||
|
||||
pb[0].pfn = virt_to_phys(&req->request_msg) >>
|
||||
PAGE_SHIFT;
|
||||
HV_HYP_PAGE_SHIFT;
|
||||
pb[0].len = req->request_msg.msg_len;
|
||||
pb[0].offset =
|
||||
(unsigned long)&req->request_msg & (PAGE_SIZE - 1);
|
||||
pb[0].offset = offset_in_hvpage(&req->request_msg);
|
||||
|
||||
/* Add one page_buf when request_msg crossing page boundary */
|
||||
if (pb[0].offset + pb[0].len > PAGE_SIZE) {
|
||||
if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
|
||||
packet->page_buf_cnt++;
|
||||
pb[0].len = PAGE_SIZE -
|
||||
pb[0].len = HV_HYP_PAGE_SIZE -
|
||||
pb[0].offset;
|
||||
pb[1].pfn = virt_to_phys((void *)&req->request_msg
|
||||
+ pb[0].len) >> PAGE_SHIFT;
|
||||
+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
|
||||
pb[1].offset = 0;
|
||||
pb[1].len = req->request_msg.msg_len -
|
||||
pb[0].len;
|
||||
|
@ -2507,7 +2507,10 @@ static void hv_pci_onchannelcallback(void *context)
|
||||
|
||||
/**
|
||||
* hv_pci_protocol_negotiation() - Set up protocol
|
||||
* @hdev: VMBus's tracking struct for this root PCI bus
|
||||
* @hdev: VMBus's tracking struct for this root PCI bus.
|
||||
* @version: Array of supported channel protocol versions in
|
||||
* the order of probing - highest go first.
|
||||
* @num_version: Number of elements in the version array.
|
||||
*
|
||||
* This driver is intended to support running on Windows 10
|
||||
* (server) and later versions. It will not run on earlier
|
||||
|
@ -1739,23 +1739,65 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
||||
payload_sz = sizeof(cmd_request->mpb);
|
||||
|
||||
if (sg_count) {
|
||||
if (sg_count > MAX_PAGE_BUFFER_COUNT) {
|
||||
unsigned int hvpgoff = 0;
|
||||
unsigned long offset_in_hvpg = sgl->offset & ~HV_HYP_PAGE_MASK;
|
||||
unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
|
||||
u64 hvpfn;
|
||||
|
||||
payload_sz = (sg_count * sizeof(u64) +
|
||||
if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
|
||||
|
||||
payload_sz = (hvpg_count * sizeof(u64) +
|
||||
sizeof(struct vmbus_packet_mpb_array));
|
||||
payload = kzalloc(payload_sz, GFP_ATOMIC);
|
||||
if (!payload)
|
||||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* sgl is a list of PAGEs, and payload->range.pfn_array
|
||||
* expects the page number in the unit of HV_HYP_PAGE_SIZE (the
|
||||
* page size that Hyper-V uses, so here we need to divide PAGEs
|
||||
* into HV_HYP_PAGE in case that PAGE_SIZE > HV_HYP_PAGE_SIZE.
|
||||
* Besides, payload->range.offset should be the offset in one
|
||||
* HV_HYP_PAGE.
|
||||
*/
|
||||
payload->range.len = length;
|
||||
payload->range.offset = sgl[0].offset;
|
||||
payload->range.offset = offset_in_hvpg;
|
||||
hvpgoff = sgl->offset >> HV_HYP_PAGE_SHIFT;
|
||||
|
||||
cur_sgl = sgl;
|
||||
for (i = 0; i < sg_count; i++) {
|
||||
payload->range.pfn_array[i] =
|
||||
page_to_pfn(sg_page((cur_sgl)));
|
||||
cur_sgl = sg_next(cur_sgl);
|
||||
for (i = 0; i < hvpg_count; i++) {
|
||||
/*
|
||||
* 'i' is the index of hv pages in the payload and
|
||||
* 'hvpgoff' is the offset (in hv pages) of the first
|
||||
* hv page in the the first page. The relationship
|
||||
* between the sum of 'i' and 'hvpgoff' and the offset
|
||||
* (in hv pages) in a payload page ('hvpgoff_in_page')
|
||||
* is as follow:
|
||||
*
|
||||
* |------------------ PAGE -------------------|
|
||||
* | NR_HV_HYP_PAGES_IN_PAGE hvpgs in total |
|
||||
* |hvpg|hvpg| ... |hvpg|... |hvpg|
|
||||
* ^ ^ ^ ^
|
||||
* +-hvpgoff-+ +-hvpgoff_in_page-+
|
||||
* ^ |
|
||||
* +--------------------- i ---------------------------+
|
||||
*/
|
||||
unsigned int hvpgoff_in_page =
|
||||
(i + hvpgoff) % NR_HV_HYP_PAGES_IN_PAGE;
|
||||
|
||||
/*
|
||||
* Two cases that we need to fetch a page:
|
||||
* 1) i == 0, the first step or
|
||||
* 2) hvpgoff_in_page == 0, when we reach the boundary
|
||||
* of a page.
|
||||
*/
|
||||
if (hvpgoff_in_page == 0 || i == 0) {
|
||||
hvpfn = page_to_hvpfn(sg_page(cur_sgl));
|
||||
cur_sgl = sg_next(cur_sgl);
|
||||
}
|
||||
|
||||
payload->range.pfn_array[i] = hvpfn + hvpgoff_in_page;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include <uapi/linux/hyperv.h>
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/list.h>
|
||||
@ -23,12 +24,55 @@
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/reciprocal_div.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
|
||||
#define MAX_PAGE_BUFFER_COUNT 32
|
||||
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
/*
|
||||
* Types for GPADL, decides is how GPADL header is created.
|
||||
*
|
||||
* It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
|
||||
* same as HV_HYP_PAGE_SIZE.
|
||||
*
|
||||
* If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
|
||||
* will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
|
||||
* into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
|
||||
* HV_HYP_PAGE will be different between different types of GPADL, for example
|
||||
* if PAGE_SIZE is 64K:
|
||||
*
|
||||
* BUFFER:
|
||||
*
|
||||
* gva: |-- 64k --|-- 64k --| ... |
|
||||
* gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
|
||||
* index: 0 1 2 15 16 17 18 .. 31 32 ...
|
||||
* | | ... | | | ... | ...
|
||||
* v V V V V V
|
||||
* gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
|
||||
* index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
|
||||
*
|
||||
* RING:
|
||||
*
|
||||
* | header | data | header | data |
|
||||
* gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
|
||||
* gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
|
||||
* index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
|
||||
* | / / / | / /
|
||||
* | / / / | / /
|
||||
* | / / ... / ... | / ... /
|
||||
* | / / / | / /
|
||||
* | / / / | / /
|
||||
* V V V V V V v
|
||||
* gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
|
||||
* index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
|
||||
*/
|
||||
enum hv_gpadl_type {
|
||||
HV_GPADL_BUFFER,
|
||||
HV_GPADL_RING
|
||||
};
|
||||
|
||||
/* Single-page buffer */
|
||||
struct hv_page_buffer {
|
||||
u32 len;
|
||||
@ -111,7 +155,7 @@ struct hv_ring_buffer {
|
||||
} feature_bits;
|
||||
|
||||
/* Pad it to PAGE_SIZE so that data starts on page boundary */
|
||||
u8 reserved2[4028];
|
||||
u8 reserved2[PAGE_SIZE - 68];
|
||||
|
||||
/*
|
||||
* Ring data starts here + RingDataStartOffset
|
||||
@ -120,6 +164,10 @@ struct hv_ring_buffer {
|
||||
u8 buffer[];
|
||||
} __packed;
|
||||
|
||||
/* Calculate the proper size of a ringbuffer, it must be page-aligned */
|
||||
#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
|
||||
(payload_sz))
|
||||
|
||||
struct hv_ring_buffer_info {
|
||||
struct hv_ring_buffer *ring_buffer;
|
||||
u32 ring_size; /* Include the shared header */
|
||||
@ -1630,4 +1678,22 @@ struct hyperv_pci_block_ops {
|
||||
|
||||
extern struct hyperv_pci_block_ops hvpci_block_ops;
|
||||
|
||||
static inline unsigned long virt_to_hvpfn(void *addr)
|
||||
{
|
||||
phys_addr_t paddr;
|
||||
|
||||
if (is_vmalloc_addr(addr))
|
||||
paddr = page_to_phys(vmalloc_to_page(addr)) +
|
||||
offset_in_page(addr);
|
||||
else
|
||||
paddr = __pa(addr);
|
||||
|
||||
return paddr >> HV_HYP_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
|
||||
#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
|
||||
#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
|
||||
#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
|
||||
|
||||
#endif /* _HYPERV_H */
|
||||
|
Loading…
Reference in New Issue
Block a user