aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/vxge/vxge-config.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vxge/vxge-config.h')
-rw-r--r--drivers/net/vxge/vxge-config.h2259
1 files changed, 2259 insertions, 0 deletions
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
new file mode 100644
index 00000000000..afbdf6f4d22
--- /dev/null
+++ b/drivers/net/vxge/vxge-config.h
@@ -0,0 +1,2259 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-config.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2009 Neterion Inc.
+ ******************************************************************************/
+#ifndef VXGE_CONFIG_H
+#define VXGE_CONFIG_H
+#include <linux/list.h>
+
+#ifndef VXGE_CACHE_LINE_SIZE
+#define VXGE_CACHE_LINE_SIZE 128
+#endif
+
+#define vxge_os_vaprintf(level, mask, fmt, ...) { \
+ char buff[255]; \
+ snprintf(buff, 255, fmt, __VA_ARGS__); \
+ printk(buff); \
+ printk("\n"); \
+}
+
+#ifndef VXGE_ALIGN
+#define VXGE_ALIGN(adrs, size) \
+ (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
+#endif
+
+#define VXGE_HW_MIN_MTU 68
+#define VXGE_HW_MAX_MTU 9600
+#define VXGE_HW_DEFAULT_MTU 1500
+
+#ifdef VXGE_DEBUG_ASSERT
+
+/**
+ * vxge_assert
+ * @test: C-condition to check
+ * @fmt: printf like format string
+ *
+ * This function implements traditional assert. By default assertions
+ * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
+ * compilation
+ * time.
+ */
+#define vxge_assert(test) { \
+ if (!(test)) \
+ vxge_os_bug("bad cond: "#test" at %s:%d\n", \
+ __FILE__, __LINE__); }
+#else
+#define vxge_assert(test)
+#endif /* end of VXGE_DEBUG_ASSERT */
+
+/**
+ * enum enum vxge_debug_level
+ * @VXGE_NONE: debug disabled
+ * @VXGE_ERR: all errors going to be logged out
+ * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
+ * going to be logged out. Very noisy.
+ *
+ * This enumeration going to be used to switch between different
+ * debug levels during runtime if DEBUG macro defined during
+ * compilation. If DEBUG macro not defined than code will be
+ * compiled out.
+ */
+enum vxge_debug_level {
+ VXGE_NONE = 0,
+ VXGE_TRACE = 1,
+ VXGE_ERR = 2
+};
+
+#define NULL_VPID 0xFFFFFFFF
+#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
+#define VXGE_DEBUG_MODULE_MASK 0xffffffff
+#define VXGE_DEBUG_TRACE_MASK 0xffffffff
+#define VXGE_DEBUG_ERR_MASK 0xffffffff
+#define VXGE_DEBUG_MASK 0x000001ff
+#else
+#define VXGE_DEBUG_MODULE_MASK 0x20000000
+#define VXGE_DEBUG_TRACE_MASK 0x20000000
+#define VXGE_DEBUG_ERR_MASK 0x20000000
+#define VXGE_DEBUG_MASK 0x00000001
+#endif
+
+/*
+ * @VXGE_COMPONENT_LL: do debug for vxge link layer module
+ * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
+ *
+ * This enumeration going to be used to distinguish modules
+ * or libraries during compilation and runtime. Makefile must declare
+ * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
+ */
+#define VXGE_COMPONENT_LL 0x20000000
+#define VXGE_COMPONENT_ALL 0xffffffff
+
+#define VXGE_HW_BASE_INF 100
+#define VXGE_HW_BASE_ERR 200
+#define VXGE_HW_BASE_BADCFG 300
+
+enum vxge_hw_status {
+ VXGE_HW_OK = 0,
+ VXGE_HW_FAIL = 1,
+ VXGE_HW_PENDING = 2,
+ VXGE_HW_COMPLETIONS_REMAIN = 3,
+
+ VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
+ VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
+
+ VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
+ VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
+ VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
+ VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
+ VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
+ VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
+ VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
+ VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
+ VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
+ VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
+ VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
+ VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
+ VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
+ VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
+ VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
+ VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
+ VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
+ VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
+ VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
+ VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
+ VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
+ VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
+
+ VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
+ VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
+ VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
+ VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
+ VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
+ VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
+ VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
+
+ VXGE_HW_EOF_TRACE_BUF = -1
+};
+
+/**
+ * enum enum vxge_hw_device_link_state - Link state enumeration.
+ * @VXGE_HW_LINK_NONE: Invalid link state.
+ * @VXGE_HW_LINK_DOWN: Link is down.
+ * @VXGE_HW_LINK_UP: Link is up.
+ *
+ */
+enum vxge_hw_device_link_state {
+ VXGE_HW_LINK_NONE,
+ VXGE_HW_LINK_DOWN,
+ VXGE_HW_LINK_UP
+};
+
+/**
+ * struct vxge_hw_device_date - Date Format
+ * @day: Day
+ * @month: Month
+ * @year: Year
+ * @date: Date in string format
+ *
+ * Structure for returning date
+ */
+
+#define VXGE_HW_FW_STRLEN 32
+struct vxge_hw_device_date {
+ u32 day;
+ u32 month;
+ u32 year;
+ char date[VXGE_HW_FW_STRLEN];
+};
+
+struct vxge_hw_device_version {
+ u32 major;
+ u32 minor;
+ u32 build;
+ char version[VXGE_HW_FW_STRLEN];
+};
+
+u64
+__vxge_hw_vpath_pci_func_mode_get(
+ u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg);
+
+/**
+ * struct vxge_hw_fifo_config - Configuration of fifo.
+ * @enable: Is this fifo to be commissioned
+ * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
+ * blocks per queue.
+ * @max_frags: Max number of Tx buffers per TxDL (that is, per single
+ * transmit operation).
+ * No more than 256 transmit buffers can be specified.
+ * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
+ * bytes. Setting @memblock_size to page size ensures
+ * by-page allocation of descriptors. 128K bytes is the
+ * maximum supported block size.
+ * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
+ * (e.g., to align on a cache line).
+ * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
+ * Use 0 otherwise.
+ * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
+ * which generally improves latency of the host bridge operation
+ * (see PCI specification). For valid values please refer
+ * to struct vxge_hw_fifo_config{} in the driver sources.
+ * Configuration of all Titan fifos.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the struct vxge_hw_fifo_config{} structure.
+ */
+struct vxge_hw_fifo_config {
+ u32 enable;
+#define VXGE_HW_FIFO_ENABLE 1
+#define VXGE_HW_FIFO_DISABLE 0
+
+ u32 fifo_blocks;
+#define VXGE_HW_MIN_FIFO_BLOCKS 2
+#define VXGE_HW_MAX_FIFO_BLOCKS 128
+
+ u32 max_frags;
+#define VXGE_HW_MIN_FIFO_FRAGS 1
+#define VXGE_HW_MAX_FIFO_FRAGS 256
+
+ u32 memblock_size;
+#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
+#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
+#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
+
+ u32 alignment_size;
+#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
+#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
+#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
+
+ u32 intr;
+#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
+#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
+#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
+
+ u32 no_snoop_bits;
+#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
+#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
+#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
+#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
+#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
+
+};
+/**
+ * struct vxge_hw_ring_config - Ring configurations.
+ * @enable: Is this ring to be commissioned
+ * @ring_blocks: Numbers of RxD blocks in the ring
+ * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
+ * to Titan User Guide.
+ * @scatter_mode: Titan supports two receive scatter modes: A and B.
+ * For details please refer to Titan User Guide.
+ * @rx_timer_val: The number of 32ns periods that would be counted between two
+ * timer interrupts.
+ * @greedy_return: If Set it forces the device to return absolutely all RxD
+ * that are consumed and still on board when a timer interrupt
+ * triggers. If Clear, then if the device has already returned
+ * RxD before current timer interrupt trigerred and after the
+ * previous timer interrupt triggered, then the device is not
+ * forced to returned the rest of the consumed RxD that it has
+ * on board which account for a byte count less than the one
+ * programmed into PRC_CFG6.RXD_CRXDT field
+ * @rx_timer_ci: TBD
+ * @backoff_interval_us: Time (in microseconds), after which Titan
+ * tries to download RxDs posted by the host.
+ * Note that the "backoff" does not happen if host posts receive
+ * descriptors in the timely fashion.
+ * Ring configuration.
+ */
+struct vxge_hw_ring_config {
+ u32 enable;
+#define VXGE_HW_RING_ENABLE 1
+#define VXGE_HW_RING_DISABLE 0
+#define VXGE_HW_RING_DEFAULT 1
+
+ u32 ring_blocks;
+#define VXGE_HW_MIN_RING_BLOCKS 1
+#define VXGE_HW_MAX_RING_BLOCKS 128
+#define VXGE_HW_DEF_RING_BLOCKS 2
+
+ u32 buffer_mode;
+#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
+#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
+#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
+#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
+
+ u32 scatter_mode;
+#define VXGE_HW_RING_SCATTER_MODE_A 0
+#define VXGE_HW_RING_SCATTER_MODE_B 1
+#define VXGE_HW_RING_SCATTER_MODE_C 2
+#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
+
+ u64 rxds_limit;
+#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
+};
+
+/**
+ * struct vxge_hw_vp_config - Configuration of virtual path
+ * @vp_id: Virtual Path Id
+ * @min_bandwidth: Minimum Guaranteed bandwidth
+ * @ring: See struct vxge_hw_ring_config{}.
+ * @fifo: See struct vxge_hw_fifo_config{}.
+ * @tti: Configuration of interrupt associated with Transmit.
+ * see struct vxge_hw_tim_intr_config();
+ * @rti: Configuration of interrupt associated with Receive.
+ * see struct vxge_hw_tim_intr_config();
+ * @mtu: mtu size used on this port.
+ * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
+ * remove the VLAN tag from all received tagged frames that are not
+ * replicated at the internal L2 switch.
+ * 0 - Do not strip the VLAN tag.
+ * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
+ * always placed into the RxDMA descriptor.
+ *
+ * This structure is used by the driver to pass the configuration parameters to
+ * configure Virtual Path.
+ */
+struct vxge_hw_vp_config {
+ u32 vp_id;
+
+#define VXGE_HW_VPATH_PRIORITY_MIN 0
+#define VXGE_HW_VPATH_PRIORITY_MAX 16
+#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
+
+ u32 min_bandwidth;
+#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
+#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
+#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
+
+ struct vxge_hw_ring_config ring;
+ struct vxge_hw_fifo_config fifo;
+ struct vxge_hw_tim_intr_config tti;
+ struct vxge_hw_tim_intr_config rti;
+
+ u32 mtu;
+#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
+#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
+#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
+
+ u32 rpa_strip_vlan_tag;
+#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
+#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
+#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
+
+};
+/**
+ * struct vxge_hw_device_config - Device configuration.
+ * @dma_blockpool_initial: Initial size of DMA Pool
+ * @dma_blockpool_max: Maximum blocks in DMA pool
+ * @intr_mode: Line, or MSI-X interrupt.
+ *
+ * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
+ * @rth_it_type: RTH IT table programming type
+ * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
+ * @vp_config: Configuration for virtual paths
+ * @device_poll_millis: Specify the interval (in mulliseconds)
+ * to wait for register reads
+ *
+ * Titan configuration.
+ * Contains per-device configuration parameters, including:
+ * - stats sampling interval, etc.
+ *
+ * In addition, struct vxge_hw_device_config{} includes "subordinate"
+ * configurations, including:
+ * - fifos and rings;
+ * - MAC (done at firmware level).
+ *
+ * See Titan User Guide for more details.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the struct vxge_hw_device_config{} structure. Please refer to the
+ * corresponding include file.
+ * See also: struct vxge_hw_tim_intr_config{}.
+ */
+struct vxge_hw_device_config {
+ u32 dma_blockpool_initial;
+ u32 dma_blockpool_max;
+#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
+#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
+#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
+#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
+
+#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
+
+ u32 intr_mode;
+#define VXGE_HW_INTR_MODE_IRQLINE 0
+#define VXGE_HW_INTR_MODE_MSIX 1
+#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
+
+#define VXGE_HW_INTR_MODE_DEF 0
+
+ u32 rth_en;
+#define VXGE_HW_RTH_DISABLE 0
+#define VXGE_HW_RTH_ENABLE 1
+#define VXGE_HW_RTH_DEFAULT 0
+
+ u32 rth_it_type;
+#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
+#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
+#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
+
+ u32 rts_mac_en;
+#define VXGE_HW_RTS_MAC_DISABLE 0
+#define VXGE_HW_RTS_MAC_ENABLE 1
+#define VXGE_HW_RTS_MAC_DEFAULT 0
+
+ struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
+
+ u32 device_poll_millis;
+#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
+#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
+#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
+
+};
+
+/**
+ * function vxge_uld_link_up_f - Link-Up callback provided by driver.
+ * @devh: HW device handle.
+ * Link-up notification callback provided by the driver.
+ * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
+ * vxge_hw_driver_initialize().
+ */
+
+/**
+ * function vxge_uld_link_down_f - Link-Down callback provided by
+ * driver.
+ * @devh: HW device handle.
+ *
+ * Link-Down notification callback provided by the driver.
+ * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
+ * vxge_hw_driver_initialize().
+ */
+
+/**
+ * function vxge_uld_crit_err_f - Critical Error notification callback.
+ * @devh: HW device handle.
+ * (typically - at HW device iinitialization time).
+ * @type: Enumerated hw error, e.g.: double ECC.
+ * @serr_data: Titan status.
+ * @ext_data: Extended data. The contents depends on the @type.
+ *
+ * Link-Down notification callback provided by the driver.
+ * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
+ * vxge_hw_driver_initialize().
+ */
+
+/**
+ * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
+ * @link_up: See vxge_uld_link_up_f{}.
+ * @link_down: See vxge_uld_link_down_f{}.
+ * @crit_err: See vxge_uld_crit_err_f{}.
+ *
+ * Driver slow-path (per-driver) callbacks.
+ * Implemented by driver and provided to HW via
+ * vxge_hw_driver_initialize().
+ * Note that these callbacks are not mandatory: HW will not invoke
+ * a callback if NULL is specified.
+ *
+ * See also: vxge_hw_driver_initialize().
+ */
+struct vxge_hw_uld_cbs {
+
+ void (*link_up)(struct __vxge_hw_device *devh);
+ void (*link_down)(struct __vxge_hw_device *devh);
+ void (*crit_err)(struct __vxge_hw_device *devh,
+ enum vxge_hw_event type, u64 ext_data);
+};
+
+/*
+ * struct __vxge_hw_blockpool_entry - Block private data structure
+ * @item: List header used to link.
+ * @length: Length of the block
+ * @memblock: Virtual address block
+ * @dma_addr: DMA Address of the block.
+ * @dma_handle: DMA handle of the block.
+ * @acc_handle: DMA acc handle
+ *
+ * Block is allocated with a header to put the blocks into list.
+ *
+ */
+struct __vxge_hw_blockpool_entry {
+ struct list_head item;
+ u32 length;
+ void *memblock;
+ dma_addr_t dma_addr;
+ struct pci_dev *dma_handle;
+ struct pci_dev *acc_handle;
+};
+
+/*
+ * struct __vxge_hw_blockpool - Block Pool
+ * @hldev: HW device
+ * @block_size: size of each block.
+ * @Pool_size: Number of blocks in the pool
+ * @pool_max: Maximum number of blocks above which to free additional blocks
+ * @req_out: Number of block requests with OS out standing
+ * @free_block_list: List of free blocks
+ *
+ * Block pool contains the DMA blocks preallocated.
+ *
+ */
+struct __vxge_hw_blockpool {
+ struct __vxge_hw_device *hldev;
+ u32 block_size;
+ u32 pool_size;
+ u32 pool_max;
+ u32 req_out;
+ struct list_head free_block_list;
+ struct list_head free_entry_list;
+};
+
+/*
+ * enum enum __vxge_hw_channel_type - Enumerated channel types.
+ * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
+ * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
+ * @VXGE_HW_CHANNEL_TYPE_RING: ring.
+ * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
+ * (and recognized) channel types. Currently: 2.
+ *
+ * Enumerated channel types. Currently there are only two link-layer
+ * channels - Titan fifo and Titan ring. In the future the list will grow.
+ */
+enum __vxge_hw_channel_type {
+ VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
+ VXGE_HW_CHANNEL_TYPE_FIFO = 1,
+ VXGE_HW_CHANNEL_TYPE_RING = 2,
+ VXGE_HW_CHANNEL_TYPE_MAX = 3
+};
+
+/*
+ * struct __vxge_hw_channel
+ * @item: List item; used to maintain a list of open channels.
+ * @type: Channel type. See enum vxge_hw_channel_type{}.
+ * @devh: Device handle. HW device object that contains _this_ channel.
+ * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
+ * @length: Channel length. Currently allocated number of descriptors.
+ * The channel length "grows" when more descriptors get allocated.
+ * See _hw_mempool_grow.
+ * @reserve_arr: Reserve array. Contains descriptors that can be reserved
+ * by driver for the subsequent send or receive operation.
+ * See vxge_hw_fifo_txdl_reserve(),
+ * vxge_hw_ring_rxd_reserve().
+ * @reserve_ptr: Current pointer in the resrve array
+ * @reserve_top: Reserve top gives the maximum number of dtrs available in
+ * reserve array.
+ * @work_arr: Work array. Contains descriptors posted to the channel.
+ * Note that at any point in time @work_arr contains 3 types of
+ * descriptors:
+ * 1) posted but not yet consumed by Titan device;
+ * 2) consumed but not yet completed;
+ * 3) completed but not yet freed
+ * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
+ * @post_index: Post index. At any point in time points on the
+ * position in the channel, which'll contain next to-be-posted
+ * descriptor.
+ * @compl_index: Completion index. At any point in time points on the
+ * position in the channel, which will contain next
+ * to-be-completed descriptor.
+ * @free_arr: Free array. Contains completed descriptors that were freed
+ * (i.e., handed over back to HW) by driver.
+ * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
+ * @free_ptr: current pointer in free array
+ * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
+ * to store per-operation control information.
+ * @stats: Pointer to common statistics
+ * @userdata: Per-channel opaque (void*) user-defined context, which may be
+ * driver object, ULP connection, etc.
+ * Once channel is open, @userdata is passed back to user via
+ * vxge_hw_channel_callback_f.
+ *
+ * HW channel object.
+ *
+ * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
+ */
+struct __vxge_hw_channel {
+ struct list_head item;
+ enum __vxge_hw_channel_type type;
+ struct __vxge_hw_device *devh;
+ struct __vxge_hw_vpath_handle *vph;
+ u32 length;
+ u32 vp_id;
+ void **reserve_arr;
+ u32 reserve_ptr;
+ u32 reserve_top;
+ void **work_arr;
+ u32 post_index ____cacheline_aligned;
+ u32 compl_index ____cacheline_aligned;
+ void **free_arr;
+ u32 free_ptr;
+ void **orig_arr;
+ u32 per_dtr_space;
+ void *userdata;
+ struct vxge_hw_common_reg __iomem *common_reg;
+ u32 first_vp_id;
+ struct vxge_hw_vpath_stats_sw_common_info *stats;
+
+} ____cacheline_aligned;
+
+/*
+ * struct __vxge_hw_virtualpath - Virtual Path
+ *
+ * @vp_id: Virtual path id
+ * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
+ * @hldev: Hal device
+ * @vp_config: Virtual Path Config
+ * @vp_reg: VPATH Register map address in BAR0
+ * @vpmgmt_reg: VPATH_MGMT register map address
+ * @max_mtu: Max mtu that can be supported
+ * @vsport_number: vsport attached to this vpath
+ * @max_kdfc_db: Maximum kernel mode doorbells
+ * @max_nofl_db: Maximum non offload doorbells
+ * @tx_intr_num: Interrupt Number associated with the TX
+
+ * @ringh: Ring Queue
+ * @fifoh: FIFO Queue
+ * @vpath_handles: Virtual Path handles list
+ * @stats_block: Memory for DMAing stats
+ * @stats: Vpath statistics
+ *
+ * Virtual path structure to encapsulate the data related to a virtual path.
+ * Virtual paths are allocated by the HW upon getting configuration from the
+ * driver and inserted into the list of virtual paths.
+ */
+struct __vxge_hw_virtualpath {
+ u32 vp_id;
+
+ u32 vp_open;
+#define VXGE_HW_VP_NOT_OPEN 0
+#define VXGE_HW_VP_OPEN 1
+
+ struct __vxge_hw_device *hldev;
+ struct vxge_hw_vp_config *vp_config;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
+ struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
+
+ u32 max_mtu;
+ u32 vsport_number;
+ u32 max_kdfc_db;
+ u32 max_nofl_db;
+
+ struct __vxge_hw_ring *____cacheline_aligned ringh;
+ struct __vxge_hw_fifo *____cacheline_aligned fifoh;
+ struct list_head vpath_handles;
+ struct __vxge_hw_blockpool_entry *stats_block;
+ struct vxge_hw_vpath_stats_hw_info *hw_stats;
+ struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
+ struct vxge_hw_vpath_stats_sw_info *sw_stats;
+};
+
+/*
+ * struct __vxge_hw_vpath_handle - List item to store callback information
+ * @item: List head to keep the item in linked list
+ * @vpath: Virtual path to which this item belongs
+ *
+ * This structure is used to store the callback information.
+ */
+struct __vxge_hw_vpath_handle{
+ struct list_head item;
+ struct __vxge_hw_virtualpath *vpath;
+};
+
+/*
+ * struct __vxge_hw_device
+ *
+ * HW device object.
+ */
+/**
+ * struct __vxge_hw_device - Hal device object
+ * @magic: Magic Number
+ * @device_id: PCI Device Id of the adapter
+ * @major_revision: PCI Device major revision
+ * @minor_revision: PCI Device minor revision
+ * @bar0: BAR0 virtual address.
+ * @bar1: BAR1 virtual address.
+ * @bar2: BAR2 virtual address.
+ * @pdev: Physical device handle
+ * @config: Confguration passed by the LL driver at initialization
+ * @link_state: Link state
+ *
+ * HW device object. Represents Titan adapter
+ */
+struct __vxge_hw_device {
+ u32 magic;
+#define VXGE_HW_DEVICE_MAGIC 0x12345678
+#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
+ u16 device_id;
+ u8 major_revision;
+ u8 minor_revision;
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *bar2;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+ struct vxge_hw_device_config config;
+ enum vxge_hw_device_link_state link_state;
+
+ struct vxge_hw_uld_cbs uld_callbacks;
+
+ u32 host_type;
+ u32 func_id;
+ u32 access_rights;
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
+ struct vxge_hw_legacy_reg __iomem *legacy_reg;
+ struct vxge_hw_toc_reg __iomem *toc_reg;
+ struct vxge_hw_common_reg __iomem *common_reg;
+ struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
+ struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
+ [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
+ struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
+ [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
+ struct vxge_hw_vpath_reg __iomem *vpath_reg \
+ [VXGE_HW_TITAN_VPATH_REG_SPACES];
+ u8 __iomem *kdfc;
+ u8 __iomem *usdc;
+ struct __vxge_hw_virtualpath virtual_paths \
+ [VXGE_HW_MAX_VIRTUAL_PATHS];
+ u64 vpath_assignments;
+ u64 vpaths_deployed;
+ u32 first_vp_id;
+ u64 tim_int_mask0[4];
+ u32 tim_int_mask1[4];
+
+ struct __vxge_hw_blockpool block_pool;
+ struct vxge_hw_device_stats stats;
+ u32 debug_module_mask;
+ u32 debug_level;
+ u32 level_err;
+ u32 level_trace;
+};
+
+#define VXGE_HW_INFO_LEN 64
+/**
+ * struct vxge_hw_device_hw_info - Device information
+ * @host_type: Host Type
+ * @func_id: Function Id
+ * @vpath_mask: vpath bit mask
+ * @fw_version: Firmware version
+ * @fw_date: Firmware Date
+ * @flash_version: Firmware version
+ * @flash_date: Firmware Date
+ * @mac_addrs: Mac addresses for each vpath
+ * @mac_addr_masks: Mac address masks for each vpath
+ *
+ * Returns the vpath mask that has the bits set for each vpath allocated
+ * for the driver and the first mac address for each vpath
+ */
+struct vxge_hw_device_hw_info {
+ u32 host_type;
+#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
+#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
+#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
+#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
+#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
+#define VXGE_HW_SR_VH_FUNCTION0 5
+#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
+#define VXGE_HW_VH_NORMAL_FUNCTION 7
+ u64 function_mode;
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0
+#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1
+#define VXGE_HW_FUNCTION_MODE_SRIOV 2
+#define VXGE_HW_FUNCTION_MODE_MRIOV 3
+ u32 func_id;
+ u64 vpath_mask;
+ struct vxge_hw_device_version fw_version;
+ struct vxge_hw_device_date fw_date;
+ struct vxge_hw_device_version flash_version;
+ struct vxge_hw_device_date flash_date;
+ u8 serial_number[VXGE_HW_INFO_LEN];
+ u8 part_number[VXGE_HW_INFO_LEN];
+ u8 product_desc[VXGE_HW_INFO_LEN];
+ u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+ u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+};
+
+/**
+ * struct vxge_hw_device_attr - Device memory spaces.
+ * @bar0: BAR0 virtual address.
+ * @bar1: BAR1 virtual address.
+ * @bar2: BAR2 virtual address.
+ * @pdev: PCI device object.
+ *
+ * Device memory spaces. Includes configuration, BAR0, BAR1, etc. per device
+ * mapped memories. Also, includes a pointer to OS-specific PCI device object.
+ */
+struct vxge_hw_device_attr {
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *bar2;
+ struct pci_dev *pdev;
+ struct vxge_hw_uld_cbs uld_callbacks;
+};
+
+#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
+
+#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
+ if (i < 16) { \
+ m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
+ m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
+ } \
+ else { \
+ m1[0] = 0x80000000; \
+ m1[1] = 0x40000000; \
+ } \
+}
+
+#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
+ if (i < 16) { \
+ m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
+ m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
+ } \
+ else { \
+ m1[0] = 0; \
+ m1[1] = 0; \
+ } \
+}
+
+#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
+ status = vxge_hw_mrpcim_stats_access(hldev, \
+ VXGE_HW_STATS_OP_READ, \
+ loc, \
+ offset, \
+ &val64); \
+ \
+ if (status != VXGE_HW_OK) \
+ return status; \
+}
+
+#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
+ status = __vxge_hw_vpath_stats_access(vpath, \
+ VXGE_HW_STATS_OP_READ, \
+ offset, \
+ &val64); \
+ if (status != VXGE_HW_OK) \
+ return status; \
+}
+
+/*
+ * struct __vxge_hw_ring - Ring channel.
+ * @channel: Channel "base" of this ring, the common part of all HW
+ * channels.
+ * @mempool: Memory pool, the pool from which descriptors get allocated.
+ * (See vxge_hw_mm.h).
+ * @config: Ring configuration, part of device configuration
+ * (see struct vxge_hw_device_config{}).
+ * @ring_length: Length of the ring
+ * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
+ * as per Titan User Guide.
+ * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
+ * 1-buffer mode descriptor is 32 byte long, etc.
+ * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
+ * per-descriptor data (e.g., DMA handle for Solaris)
+ * @per_rxd_space: Per rxd space requested by driver
+ * @rxds_per_block: Number of descriptors per hardware-defined RxD
+ * block. Depends on the (1-, 3-, 5-) buffer mode.
+ * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
+ * usage. Not to confuse with @rxd_priv_size.
+ * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
+ * @callback: Channel completion callback. HW invokes the callback when there
+ * are new completions on that channel. In many implementations
+ * the @callback executes in the hw interrupt context.
+ * @rxd_init: Channel's descriptor-initialize callback.
+ * See vxge_hw_ring_rxd_init_f{}.
+ * If not NULL, HW invokes the callback when opening
+ * the ring.
+ * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
+ * HW invokes the callback when closing the corresponding channel.
+ * See also vxge_hw_channel_rxd_term_f{}.
+ * @stats: Statistics for ring
+ * Ring channel.
+ *
+ * Note: The structure is cache line aligned to better utilize
+ * CPU cache performance.
+ */
+struct __vxge_hw_ring {
+ struct __vxge_hw_channel channel;
+ struct vxge_hw_mempool *mempool;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_common_reg __iomem *common_reg;
+ u32 ring_length;
+ u32 buffer_mode;
+ u32 rxd_size;
+ u32 rxd_priv_size;
+ u32 per_rxd_space;
+ u32 rxds_per_block;
+ u32 rxdblock_priv_size;
+ u32 cmpl_cnt;
+ u32 vp_id;
+ u32 doorbell_cnt;
+ u32 total_db_cnt;
+ u64 rxds_limit;
+
+ enum vxge_hw_status (*callback)(
+ struct __vxge_hw_ring *ringh,
+ void *rxdh,
+ u8 t_code,
+ void *userdata);
+
+ enum vxge_hw_status (*rxd_init)(
+ void *rxdh,
+ void *userdata);
+
+ void (*rxd_term)(
+ void *rxdh,
+ enum vxge_hw_rxd_state state,
+ void *userdata);
+
+ struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
+ struct vxge_hw_ring_config *config;
+} ____cacheline_aligned;
+
+/**
+ * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
+ * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
+ * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
+ * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
+ * device.
+ * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
+ * filling-in and posting later.
+ *
+ * Titan/HW descriptor states.
+ *
+ */
+enum vxge_hw_txdl_state {
+ VXGE_HW_TXDL_STATE_NONE = 0,
+ VXGE_HW_TXDL_STATE_AVAIL = 1,
+ VXGE_HW_TXDL_STATE_POSTED = 2,
+ VXGE_HW_TXDL_STATE_FREED = 3
+};
+/*
+ * struct __vxge_hw_fifo - Fifo.
+ * @channel: Channel "base" of this fifo, the common part of all HW
+ * channels.
+ * @mempool: Memory pool, from which descriptors get allocated.
+ * @config: Fifo configuration, part of device configuration
+ * (see struct vxge_hw_device_config{}).
+ * @interrupt_type: Interrupt type to be used
+ * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
+ * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
+ * on TxDL please refer to Titan UG.
+ * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
+ * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
+ * @priv_size: Per-Tx descriptor space reserved for driver
+ * usage.
+ * @per_txdl_space: Per txdl private space for the driver
+ * @callback: Fifo completion callback. HW invokes the callback when there
+ * are new completions on that fifo. In many implementations
+ * the @callback executes in the hw interrupt context.
+ * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
+ * HW invokes the callback when closing the corresponding fifo.
+ * See also vxge_hw_fifo_txdl_term_f{}.
+ * @stats: Statistics of this fifo
+ *
+ * Fifo channel.
+ * Note: The structure is cache line aligned.
+ */
+struct __vxge_hw_fifo {
+ struct __vxge_hw_channel channel;
+ struct vxge_hw_mempool *mempool;
+ struct vxge_hw_fifo_config *config;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
+ u64 interrupt_type;
+ u32 no_snoop_bits;
+ u32 txdl_per_memblock;
+ u32 txdl_size;
+ u32 priv_size;
+ u32 per_txdl_space;
+ u32 vp_id;
+ u32 tx_intr_num;
+
+ enum vxge_hw_status (*callback)(
+ struct __vxge_hw_fifo *fifo_handle,
+ void *txdlh,
+ enum vxge_hw_fifo_tcode t_code,
+ void *userdata,
+ void **skb_ptr);
+
+ void (*txdl_term)(
+ void *txdlh,
+ enum vxge_hw_txdl_state state,
+ void *userdata);
+
+ struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
+} ____cacheline_aligned;
+
+/*
+ * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
+ * @dma_addr: DMA (mapped) address of _this_ descriptor.
+ * @dma_handle: DMA handle used to map the descriptor onto device.
+ * @dma_offset: Descriptor's offset in the memory block. HW allocates
+ * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
+ * Each memblock is a contiguous block of DMA-able memory.
+ * @frags: Total number of fragments (that is, contiguous data buffers)
+ * carried by this TxDL.
+ * @align_vaddr_start: Aligned virtual address start
+ * @align_vaddr: Virtual address of the per-TxDL area in memory used for
+ * alignement. Used to place one or more mis-aligned fragments
+ * @align_dma_addr: DMA address translated from the @align_vaddr.
+ * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
+ * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
+ * @align_dma_offset: The current offset into the @align_vaddr area.
+ * Grows while filling the descriptor, gets reset.
+ * @align_used_frags: Number of fragments used.
+ * @alloc_frags: Total number of fragments allocated.
+ * @unused: TODO
+ * @next_txdl_priv: (TODO).
+ * @first_txdp: (TODO).
+ * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
+ * TxDL list.
+ * @txdlh: Corresponding txdlh to this TxDL.
+ * @memblock: Pointer to the TxDL memory block or memory page.
+ * on the next send operation.
+ * @dma_object: DMA address and handle of the memory block that contains
+ * the descriptor. This member is used only in the "checked"
+ * version of the HW (to enforce certain assertions);
+ * otherwise it gets compiled out.
+ * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
+ *
+ * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
+ * information associated with the descriptor. Note that driver can ask HW
+ * to allocate additional per-descriptor space for its own (driver-specific)
+ * purposes.
+ *