/*
*
* Copyright (c) 2011, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
*
*/
#ifndef _HYPERV_H
#define _HYPERV_H
#include <uapi/linux/hyperv.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/reciprocal_div.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1)
/* Single-page buffer */
struct hv_page_buffer {
u32 len;
u32 offset;
u64 pfn;
};
/* Multiple-page buffer */
struct hv_multipage_buffer {
/* Length and Offset determines the # of pfns in the array */
u32 len;
u32 offset;
u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
};
/*
* Multiple-page buffer array; the pfn array is variable size:
* The number of entries in the PFN array is determined by
* "len" and "offset".
*/
struct hv_mpb_array {
/* Length and Offset determines the # of pfns in the array */
u32 len;
u32 offset;
u64 pfn_array[];
};
/* 0x18 includes the proprietary packet header */
#define MAX_PAGE_BUFFER_PACKET (0x18 + \
(sizeof(struct hv_page_buffer) * \
MAX_PAGE_BUFFER_COUNT))
#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
sizeof(struct hv_multipage_buffer))
#pragma pack(pop)
struct hv_ring_buffer {
/* Offset in bytes from the start of ring data below */
u32 write_index;
/* Offset in bytes from the start of ring data below */
u32 read_index;
u32 interrupt_mask;
/*
* WS2012/Win8 and later versions of Hyper-V implement interrupt
* driven flow management. The feature bit feat_pending_send_sz
* is set by the host on the host->guest ring buffer, and by the
* guest on the guest->host ring buffer.
*
* The meaning of the feature bit is a bit complex in that it has
* semantics that apply to both ring buffers. If the guest sets
* the feature bit in the guest->host ring buffer, the guest is
* telling the host that:
* 1) It will set the pending_send_sz field in the guest->host ring
* buffer when it is waiting for space to become available, and
* 2) It will read the pending_send_sz field in the host->guest
* ring buffer and interrupt the host when it frees enough space
*
* Similarly, if the host sets the feature bit in the host->guest
* ring buffer, the host is telling the guest that:
* 1) It will set the pending_send_sz field in the host->guest ring
* buffer when it is waiting for space to become available, and
* 2) It will read the pending_send_sz field in the guest->host
* ring buffer and interrupt the guest when it frees enough space
*
* If either the guest or host does not set the feature bit that it
* owns, that guest or host must do polling if it encounters a full
* ring buffer, and not signal the other end with an interrupt.
*/
u32 pending_send_sz;
u32 reserved1[12];
union {
struct {
u32 feat_pending_send_sz:1;
};
u32 value;
} feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
u8 reserved2[4028];
/*
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
u8 buffer[0];
} __packed;
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */
struct reciprocal_value ring_size_div10_reciprocal;
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
u32 priv_read_index;
/*
* The ring buffer mutex lock. This lock prevents the ring buffer from
* being freed while the ring buffer is being accessed.
*/
struct mutex ring_buffer_mutex;
};
static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
{
u32 read_loc, write_loc, dsize, read;
dsize = rbi->ring_datasize;
read_loc = rbi->ring_buffer->read_index;
write_loc = READ_ONCE(rbi->ring_buffer->write_index);
read = write_loc >= read_loc ? (write_loc - read_loc) :
(dsize - read_loc) + write_loc;
return read;
}
static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
{
u32 read_loc, write_loc, dsize, write;
dsize = rbi->ring_datasize;
read_loc = READ_ONCE(rbi->ring_buffer->read_index);
write_loc = rbi->ring_buffer->write_index;
write = write_loc >= read_loc ? dsize - (write_loc - rea
|