diff options
| author | Ingo Molnar <mingo@kernel.org> | 2014-01-12 17:56:29 +0100 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-01-12 17:56:29 +0100 |
| commit | da4540757d35a98a350d6e8d882a64b2d04f8581 (patch) | |
| tree | ad77d8752f5db40497c819f051cbae12effb8163 /drivers/net/xen-netback/common.h | |
| parent | cc131eef1cec905b9bdb0be6d7ab3af86f23de98 (diff) | |
| parent | 7e22e91102c6b9df7c4ae2168910e19d2bb14cd6 (diff) | |
Merge tag 'v3.13-rc8' into x86/ras, to pick up fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/net/xen-netback/common.h')
| -rw-r--r-- | drivers/net/xen-netback/common.h | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 08ae01b41c8..c47794b9d42 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -101,6 +101,13 @@ struct xenvif_rx_meta { #define MAX_PENDING_REQS 256 +/* It's possible for an skb to have a maximal number of frags + * but still be less than MAX_BUFFER_OFFSET in size. Thus the + * worst-case number of copy operations is MAX_SKB_FRAGS per + * ring slot. + */ +#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; @@ -143,13 +150,13 @@ struct xenvif { */ RING_IDX rx_req_cons_peek; - /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each - * head/fragment page uses 2 copy operations because it - * straddles two buffers in the frontend. - */ - struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; - struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; + /* This array is allocated seperately as it is large */ + struct gnttab_copy *grant_copy_op; + /* We create one meta structure per ring request we consume, so + * the maximum number is the same as the ring size. + */ + struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; u8 fe_dev_addr[6]; |
