From aa0a34be68290aa9aa071c0691fb8b6edda38358 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Mon, 13 Apr 2015 16:34:35 -0700 Subject: hv_netvsc: Implement partial copy into send buffer If remaining space in a send buffer slot is too small for the whole message, we only copy the RNDIS header and PPI data into send buffer, so we can batch one more packet each time. It reduces the vmbus per-message overhead. Signed-off-by: Haiyang Zhang Reviewed-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 50 ++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 16 deletions(-) (limited to 'drivers/net/hyperv/netvsc.c') diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4d4d497d5762..2e8ad0636b46 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -703,15 +703,18 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, u32 msg_size = 0; u32 padding = 0; u32 remain = packet->total_data_buflen % net_device->pkt_align; + u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : + packet->page_buf_cnt; /* Add padding */ - if (packet->is_data_pkt && packet->xmit_more && remain) { + if (packet->is_data_pkt && packet->xmit_more && remain && + !packet->cp_partial) { padding = net_device->pkt_align - remain; packet->rndis_msg->msg_len += padding; packet->total_data_buflen += padding; } - for (i = 0; i < packet->page_buf_cnt; i++) { + for (i = 0; i < page_count; i++) { char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); u32 offset = packet->page_buf[i].offset; u32 len = packet->page_buf[i].len; @@ -739,6 +742,7 @@ static inline int netvsc_send_pkt( struct net_device *ndev = net_device->ndev; u64 req_id; int ret; + struct hv_page_buffer *pgbuf; nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; if (packet->is_data_pkt) { @@ -766,8 +770,10 @@ static inline int netvsc_send_pkt( return -ENODEV; if (packet->page_buf_cnt) { + pgbuf = packet->cp_partial ? packet->page_buf + + packet->rmsg_pgcnt : packet->page_buf; ret = vmbus_sendpacket_pagebuffer(out_channel, - packet->page_buf, + pgbuf, packet->page_buf_cnt, &nvmsg, sizeof(struct nvsp_message), @@ -824,6 +830,7 @@ int netvsc_send(struct hv_device *device, unsigned long flag; struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; + bool try_batch; net_device = get_outbound_net_device(device); if (!net_device) @@ -837,6 +844,7 @@ int netvsc_send(struct hv_device *device, } packet->channel = out_channel; packet->send_buf_index = NETVSC_INVALID_INDEX; + packet->cp_partial = false; msdp = &net_device->msd[q_idx]; @@ -845,12 +853,18 @@ int netvsc_send(struct hv_device *device, if (msdp->pkt) msd_len = msdp->pkt->total_data_buflen; - if (packet->is_data_pkt && msd_len > 0 && - msdp->count < net_device->max_pkt && - msd_len + pktlen + net_device->pkt_align < + try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count < + net_device->max_pkt; + + if (try_batch && msd_len + pktlen + net_device->pkt_align < net_device->send_section_size) { section_index = msdp->pkt->send_buf_index; + } else if (try_batch && msd_len + packet->rmsg_size < + net_device->send_section_size) { + section_index = msdp->pkt->send_buf_index; + packet->cp_partial = true; + } else if (packet->is_data_pkt && pktlen + net_device->pkt_align < net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); @@ -866,22 +880,26 @@ int netvsc_send(struct hv_device *device, netvsc_copy_to_send_buf(net_device, section_index, msd_len, packet); - if (!packet->part_of_skb) { - skb = (struct sk_buff *) - (unsigned long) - packet->send_completion_tid; - - packet->send_completion_tid = 0; - } - packet->page_buf_cnt = 0; packet->send_buf_index = section_index; - packet->total_data_buflen += msd_len; + + if (packet->cp_partial) { + packet->page_buf_cnt -= packet->rmsg_pgcnt; + packet->total_data_buflen = msd_len + packet->rmsg_size; + } else { + packet->page_buf_cnt = 0; + packet->total_data_buflen += msd_len; + if (!packet->part_of_skb) { + skb = (struct sk_buff *)(unsigned long)packet-> + send_completion_tid; + packet->send_completion_tid = 0; + } + } if (msdp->pkt) netvsc_xmit_completion(msdp->pkt); - if (packet->xmit_more) { + if (packet->xmit_more && !packet->cp_partial) { msdp->pkt = packet; msdp->count++; } else { -- cgit v1.2.3