From 4a71df50047f0db65ea09b1be155852e81a45eba Mon Sep 17 00:00:00 2001 From: Frank Blaschka Date: Fri, 15 Feb 2008 09:19:42 +0100 Subject: qeth: new qeth device driver List of major changes and improvements: no manipulation of the global ARP constructor clean code split into core, layer 2 and layer 3 functionality better exploitation of the ethtool interface better representation of the various hardware capabilities fix packet socket support (tcpdump), no fake_ll required osasnmpd notification via udev events coding style and beautification Signed-off-by: Frank Blaschka Signed-off-by: Jeff Garzik --- arch/s390/defconfig | 8 +- drivers/s390/net/Kconfig | 31 +- drivers/s390/net/Makefile | 7 +- drivers/s390/net/qeth_core.h | 916 ++++++++ drivers/s390/net/qeth_core_main.c | 4540 +++++++++++++++++++++++++++++++++++++ drivers/s390/net/qeth_core_mpc.c | 266 +++ drivers/s390/net/qeth_core_mpc.h | 566 +++++ drivers/s390/net/qeth_core_offl.c | 701 ++++++ drivers/s390/net/qeth_core_offl.h | 76 + drivers/s390/net/qeth_core_sys.c | 651 ++++++ drivers/s390/net/qeth_l2_main.c | 1242 ++++++++++ drivers/s390/net/qeth_l3.h | 76 + drivers/s390/net/qeth_l3_main.c | 3388 +++++++++++++++++++++++++++ drivers/s390/net/qeth_l3_sys.c | 1051 +++++++++ 14 files changed, 13498 insertions(+), 21 deletions(-) create mode 100644 drivers/s390/net/qeth_core.h create mode 100644 drivers/s390/net/qeth_core_main.c create mode 100644 drivers/s390/net/qeth_core_mpc.c create mode 100644 drivers/s390/net/qeth_core_mpc.h create mode 100644 drivers/s390/net/qeth_core_offl.c create mode 100644 drivers/s390/net/qeth_core_offl.h create mode 100644 drivers/s390/net/qeth_core_sys.c create mode 100644 drivers/s390/net/qeth_l2_main.c create mode 100644 drivers/s390/net/qeth_l3.h create mode 100644 drivers/s390/net/qeth_l3_main.c create mode 100644 drivers/s390/net/qeth_l3_sys.c diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 62f6b5a606d..cb93bf20bd7 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -537,11 +537,9 @@ CONFIG_CTC=m # CONFIG_SMSGIUCV is not set # CONFIG_CLAW is not set CONFIG_QETH=y - -# -# Gigabit Ethernet default settings -# -# CONFIG_QETH_IPV6 is not set +CONFIG_QETH_L2=y +CONFIG_QETH_L3=y +CONFIG_QETH_IPV6=y CONFIG_CCWGROUP=y # CONFIG_PPP is not set # CONFIG_SLIP is not set diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 773f5a6d582..a7745c82b4a 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -67,23 +67,26 @@ config QETH To compile this driver as a module, choose M. The module name is qeth.ko. +config QETH_L2 + tristate "qeth layer 2 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 2 mode. + To compile as a module, choose M. The module name is qeth_l2.ko. + If unsure, choose y. -comment "Gigabit Ethernet default settings" - depends on QETH +config QETH_L3 + tristate "qeth layer 3 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 3 mode. + To compile as a module choose M. The module name is qeth_l3.ko. + If unsure, choose Y. config QETH_IPV6 - bool "IPv6 support for gigabit ethernet" - depends on (QETH = IPV6) || (QETH && IPV6 = 'y') - help - If CONFIG_QETH is switched on, this option will include IPv6 - support in the qeth device driver. - -config QETH_VLAN - bool "VLAN support for gigabit ethernet" - depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y') - help - If CONFIG_QETH is switched on, this option will include IEEE - 802.1q VLAN support in the qeth device driver. + bool + depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y') + default y config CCWGROUP tristate diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index f6d189a8a45..6382c04d2bd 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -8,6 +8,9 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_LCS) += lcs.o cu3088.o obj-$(CONFIG_CLAW) += claw.o cu3088.o -qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o -qeth-$(CONFIG_PROC_FS) += qeth_proc.o +qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o obj-$(CONFIG_QETH) += qeth.o +qeth_l2-y += qeth_l2_main.o +obj-$(CONFIG_QETH_L2) += qeth_l2.o +qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o +obj-$(CONFIG_QETH_L3) += qeth_l3.o diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h new file mode 100644 index 00000000000..9485e363ca1 --- /dev/null +++ b/drivers/s390/net/qeth_core.h @@ -0,0 +1,916 @@ +/* + * drivers/s390/net/qeth_core.h + * + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher , + * Frank Pavlic , + * Thomas Spatzier , + * Frank Blaschka + */ + +#ifndef __QETH_CORE_H__ +#define __QETH_CORE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "qeth_core_mpc.h" + +/** + * Debug Facility stuff + */ +#define QETH_DBF_SETUP_NAME "qeth_setup" +#define QETH_DBF_SETUP_LEN 8 +#define QETH_DBF_SETUP_PAGES 8 +#define QETH_DBF_SETUP_NR_AREAS 1 +#define QETH_DBF_SETUP_LEVEL 5 + +#define QETH_DBF_MISC_NAME "qeth_misc" +#define QETH_DBF_MISC_LEN 128 +#define QETH_DBF_MISC_PAGES 2 +#define QETH_DBF_MISC_NR_AREAS 1 +#define QETH_DBF_MISC_LEVEL 2 + +#define QETH_DBF_DATA_NAME "qeth_data" +#define QETH_DBF_DATA_LEN 96 +#define QETH_DBF_DATA_PAGES 8 +#define QETH_DBF_DATA_NR_AREAS 1 +#define QETH_DBF_DATA_LEVEL 2 + +#define QETH_DBF_CONTROL_NAME "qeth_control" +#define QETH_DBF_CONTROL_LEN 256 +#define QETH_DBF_CONTROL_PAGES 8 +#define QETH_DBF_CONTROL_NR_AREAS 1 +#define QETH_DBF_CONTROL_LEVEL 5 + +#define QETH_DBF_TRACE_NAME "qeth_trace" +#define QETH_DBF_TRACE_LEN 8 +#define QETH_DBF_TRACE_PAGES 4 +#define QETH_DBF_TRACE_NR_AREAS 1 +#define QETH_DBF_TRACE_LEVEL 3 + +#define QETH_DBF_SENSE_NAME "qeth_sense" +#define QETH_DBF_SENSE_LEN 64 +#define QETH_DBF_SENSE_PAGES 2 +#define QETH_DBF_SENSE_NR_AREAS 1 +#define QETH_DBF_SENSE_LEVEL 2 + +#define QETH_DBF_QERR_NAME "qeth_qerr" +#define QETH_DBF_QERR_LEN 8 +#define QETH_DBF_QERR_PAGES 2 +#define QETH_DBF_QERR_NR_AREAS 1 +#define QETH_DBF_QERR_LEVEL 2 + +#define QETH_DBF_TEXT(name, level, text) \ + do { \ + debug_text_event(qeth_dbf_##name, level, text); \ + } while (0) + +#define QETH_DBF_HEX(name, level, addr, len) \ + do { \ + debug_event(qeth_dbf_##name, level, (void *)(addr), len); \ + } while (0) + +/* Allow to sort out low debug levels early to avoid wasted sprints */ +static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level) +{ + return (level <= dbf_grp->level); +} + +/** + * some more debug stuff + */ +#define PRINTK_HEADER "qeth: " + +#define SENSE_COMMAND_REJECT_BYTE 0 +#define SENSE_COMMAND_REJECT_FLAG 0x80 +#define SENSE_RESETTING_EVENT_BYTE 1 +#define SENSE_RESETTING_EVENT_FLAG 0x80 + +/* + * Common IO related definitions + */ +#define CARD_RDEV(card) card->read.ccwdev +#define CARD_WDEV(card) card->write.ccwdev +#define CARD_DDEV(card) card->data.ccwdev +#define CARD_BUS_ID(card) card->gdev->dev.bus_id +#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id +#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id +#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id +#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id + +/** + * card stuff + */ +struct qeth_perf_stats { + unsigned int bufs_rec; + unsigned int bufs_sent; + + unsigned int skbs_sent_pack; + unsigned int bufs_sent_pack; + + unsigned int sc_dp_p; + unsigned int sc_p_dp; + /* qdio_input_handler: number of times called, time spent in */ + __u64 inbound_start_time; + unsigned int inbound_cnt; + unsigned int inbound_time; + /* qeth_send_packet: number of times called, time spent in */ + __u64 outbound_start_time; + unsigned int outbound_cnt; + unsigned int outbound_time; + /* qdio_output_handler: number of times called, time spent in */ + __u64 outbound_handler_start_time; + unsigned int outbound_handler_cnt; + unsigned int outbound_handler_time; + /* number of calls to and time spent in do_QDIO for inbound queue */ + __u64 inbound_do_qdio_start_time; + unsigned int inbound_do_qdio_cnt; + unsigned int inbound_do_qdio_time; + /* number of calls to and time spent in do_QDIO for outbound queues */ + __u64 outbound_do_qdio_start_time; + unsigned int outbound_do_qdio_cnt; + unsigned int outbound_do_qdio_time; + /* eddp data */ + unsigned int large_send_bytes; + unsigned int large_send_cnt; + unsigned int sg_skbs_sent; + unsigned int sg_frags_sent; + /* initial values when measuring starts */ + unsigned long initial_rx_packets; + unsigned long initial_tx_packets; + /* inbound scatter gather data */ + unsigned int sg_skbs_rx; + unsigned int sg_frags_rx; + unsigned int sg_alloc_page_rx; +}; + +/* Routing stuff */ +struct qeth_routing_info { + enum qeth_routing_types type; +}; + +/* IPA stuff */ +struct qeth_ipa_info { + __u32 supported_funcs; + __u32 enabled_funcs; +}; + +static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, + enum qeth_ipa_funcs func) +{ + return (ipa->supported_funcs & func); +} + +static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, + enum qeth_ipa_funcs func) +{ + return (ipa->supported_funcs & ipa->enabled_funcs & func); +} + +#define qeth_adp_supported(c, f) \ + qeth_is_ipa_supported(&c->options.adp, f) +#define qeth_adp_enabled(c, f) \ + qeth_is_ipa_enabled(&c->options.adp, f) +#define qeth_is_supported(c, f) \ + qeth_is_ipa_supported(&c->options.ipa4, f) +#define qeth_is_enabled(c, f) \ + qeth_is_ipa_enabled(&c->options.ipa4, f) +#define qeth_is_supported6(c, f) \ + qeth_is_ipa_supported(&c->options.ipa6, f) +#define qeth_is_enabled6(c, f) \ + qeth_is_ipa_enabled(&c->options.ipa6, f) +#define qeth_is_ipafunc_supported(c, prot, f) \ + ((prot == QETH_PROT_IPV6) ? \ + qeth_is_supported6(c, f) : qeth_is_supported(c, f)) +#define qeth_is_ipafunc_enabled(c, prot, f) \ + ((prot == QETH_PROT_IPV6) ? \ + qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) + +#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101 +#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101 +#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108 +#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108 + +#define QETH_MODELLIST_ARRAY \ + {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \ + QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ + QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ + QETH_MAX_QUEUES, 0}, \ + {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \ + QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \ + QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \ + QETH_MAX_QUEUES, 0x103}, \ + {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \ + QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ + QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ + QETH_MAX_QUEUES, 0}, \ + {0, 0, 0, 0, 0, 0, 0, 0, 0} } + +#define QETH_REAL_CARD 1 +#define QETH_VLAN_CARD 2 +#define QETH_BUFSIZE 4096 + +/** + * some more defs + */ +#define QETH_TX_TIMEOUT 100 * HZ +#define QETH_RCD_TIMEOUT 60 * HZ +#define QETH_HEADER_SIZE 32 +#define QETH_MAX_PORTNO 15 + +/*IPv6 address autoconfiguration stuff*/ +#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe +#define UNIQUE_ID_NOT_BY_CARD 0x10000 + +/*****************************************************************************/ +/* QDIO queue and buffer handling */ +/*****************************************************************************/ +#define QETH_MAX_QUEUES 4 +#define QETH_IN_BUF_SIZE_DEFAULT 65536 +#define QETH_IN_BUF_COUNT_DEFAULT 16 +#define QETH_IN_BUF_COUNT_MIN 8 +#define QETH_IN_BUF_COUNT_MAX 128 +#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) +#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ + ((card)->qdio.in_buf_pool.buf_count / 2) + +/* buffers we have to be behind before we get a PCI */ +#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) +/*enqueued free buffers left before we get a PCI*/ +#define QETH_PCI_THRESHOLD_B(card) 0 +/*not used unless the microcode gets patched*/ +#define QETH_PCI_TIMER_VALUE(card) 3 + +#define QETH_MIN_INPUT_THRESHOLD 1 +#define QETH_MAX_INPUT_THRESHOLD 500 +#define QETH_MIN_OUTPUT_THRESHOLD 1 +#define QETH_MAX_OUTPUT_THRESHOLD 300 + +/* priority queing */ +#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING +#define QETH_DEFAULT_QUEUE 2 +#define QETH_NO_PRIO_QUEUEING 0 +#define QETH_PRIO_Q_ING_PREC 1 +#define QETH_PRIO_Q_ING_TOS 2 +#define IP_TOS_LOWDELAY 0x10 +#define IP_TOS_HIGHTHROUGHPUT 0x08 +#define IP_TOS_HIGHRELIABILITY 0x04 +#define IP_TOS_NOTIMPORTANT 0x02 + +/* Packing */ +#define QETH_LOW_WATERMARK_PACK 2 +#define QETH_HIGH_WATERMARK_PACK 5 +#define QETH_WATERMARK_PACK_FUZZ 1 + +#define QETH_IP_HEADER_SIZE 40 + +/* large receive scatter gather copy break */ +#define QETH_RX_SG_CB (PAGE_SIZE >> 1) + +struct qeth_hdr_layer3 { + __u8 id; + __u8 flags; + __u16 inbound_checksum; /*TSO:__u16 seqno */ + __u32 token; /*TSO: __u32 reserved */ + __u16 length; + __u8 vlan_prio; + __u8 ext_flags; + __u16 vlan_id; + __u16 frame_offset; + __u8 dest_addr[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_layer2 { + __u8 id; + __u8 flags[3]; + __u8 port_no; + __u8 hdr_length; + __u16 pkt_length; + __u16 seq_no; + __u16 vlan_id; + __u32 reserved; + __u8 reserved2[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_osn { + __u8 id; + __u8 reserved; + __u16 seq_no; + __u16 reserved2; + __u16 control_flags; + __u16 pdu_length; + __u8 reserved3[18]; + __u32 ccid; +} __attribute__ ((packed)); + +struct qeth_hdr { + union { + struct qeth_hdr_layer2 l2; + struct qeth_hdr_layer3 l3; + struct qeth_hdr_osn osn; + } hdr; +} __attribute__ ((packed)); + +/*TCP Segmentation Offload header*/ +struct qeth_hdr_ext_tso { + __u16 hdr_tot_len; + __u8 imb_hdr_no; + __u8 reserved; + __u8 hdr_type; + __u8 hdr_version; + __u16 hdr_len; + __u32 payload_len; + __u16 mss; + __u16 dg_hdr_len; + __u8 padding[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_tso { + struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/ + struct qeth_hdr_ext_tso ext; +} __attribute__ ((packed)); + + +/* flags for qeth_hdr.flags */ +#define QETH_HDR_PASSTHRU 0x10 +#define QETH_HDR_IPV6 0x80 +#define QETH_HDR_CAST_MASK 0x07 +enum qeth_cast_flags { + QETH_CAST_UNICAST = 0x06, + QETH_CAST_MULTICAST = 0x04, + QETH_CAST_BROADCAST = 0x05, + QETH_CAST_ANYCAST = 0x07, + QETH_CAST_NOCAST = 0x00, +}; + +enum qeth_layer2_frame_flags { + QETH_LAYER2_FLAG_MULTICAST = 0x01, + QETH_LAYER2_FLAG_BROADCAST = 0x02, + QETH_LAYER2_FLAG_UNICAST = 0x04, + QETH_LAYER2_FLAG_VLAN = 0x10, +}; + +enum qeth_header_ids { + QETH_HEADER_TYPE_LAYER3 = 0x01, + QETH_HEADER_TYPE_LAYER2 = 0x02, + QETH_HEADER_TYPE_TSO = 0x03, + QETH_HEADER_TYPE_OSN = 0x04, +}; +/* flags for qeth_hdr.ext_flags */ +#define QETH_HDR_EXT_VLAN_FRAME 0x01 +#define QETH_HDR_EXT_TOKEN_ID 0x02 +#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04 +#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08 +#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10 +#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 +#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/ + +static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) +{ + return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); +} + +enum qeth_qdio_buffer_states { + /* + * inbound: read out by driver; owned by hardware in order to be filled + * outbound: owned by driver in order to be filled + */ + QETH_QDIO_BUF_EMPTY, + /* + * inbound: filled by hardware; owned by driver in order to be read out + * outbound: filled by driver; owned by hardware in order to be sent + */ + QETH_QDIO_BUF_PRIMED, +}; + +enum qeth_qdio_info_states { + QETH_QDIO_UNINITIALIZED, + QETH_QDIO_ALLOCATED, + QETH_QDIO_ESTABLISHED, + QETH_QDIO_CLEANING +}; + +struct qeth_buffer_pool_entry { + struct list_head list; + struct list_head init_list; + void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; +}; + +struct qeth_qdio_buffer_pool { + struct list_head entry_list; + int buf_count; +}; + +struct qeth_qdio_buffer { + struct qdio_buffer *buffer; + /* the buffer pool entry currently associated to this buffer */ + struct qeth_buffer_pool_entry *pool_entry; +}; + +struct qeth_qdio_q { + struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + int next_buf_to_init; +} __attribute__ ((aligned(256))); + +/* possible types of qeth large_send support */ +enum qeth_large_send_types { + QETH_LARGE_SEND_NO, + QETH_LARGE_SEND_EDDP, + QETH_LARGE_SEND_TSO, +}; + +struct qeth_qdio_out_buffer { + struct qdio_buffer *buffer; + atomic_t state; + int next_element_to_fill; + struct sk_buff_head skb_list; + struct list_head ctx_list; +}; + +struct qeth_card; + +enum qeth_out_q_states { + QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED, + QETH_OUT_Q_LOCKED_FLUSH, +}; + +struct qeth_qdio_out_q { + struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + int queue_no; + struct qeth_card *card; + atomic_t state; + int do_pack; + /* + * index of buffer to be filled by driver; state EMPTY or PACKING + */ + int next_buf_to_fill; + /* + * number of buffers that are currently filled (PRIMED) + * -> these buffers are hardware-owned + */ + atomic_t used_buffers; + /* indicates whether PCI flag must be set (or if one is outstanding) */ + atomic_t set_pci_flags_count; +} __attribute__ ((aligned(256))); + +struct qeth_qdio_info { + atomic_t state; + /* input */ + struct qeth_qdio_q *in_q; + struct qeth_qdio_buffer_pool in_buf_pool; + struct qeth_qdio_buffer_pool init_pool; + int in_buf_size; + + /* output */ + int no_out_queues; + struct qeth_qdio_out_q **out_qs; + + /* priority queueing */ + int do_prio_queueing; + int default_out_queue; +}; + +enum qeth_send_errors { + QETH_SEND_ERROR_NONE, + QETH_SEND_ERROR_LINK_FAILURE, + QETH_SEND_ERROR_RETRY, + QETH_SEND_ERROR_KICK_IT, +}; + +#define QETH_ETH_MAC_V4 0x0100 /* like v4 */ +#define QETH_ETH_MAC_V6 0x3333 /* like v6 */ +/* tr mc mac is longer, but that will be enough to detect mc frames */ +#define QETH_TR_MAC_NC 0xc000 /* non-canonical */ +#define QETH_TR_MAC_C 0x0300 /* canonical */ + +#define DEFAULT_ADD_HHLEN 0 +#define MAX_ADD_HHLEN 1024 + +/** + * buffer stuff for read channel + */ +#define QETH_CMD_BUFFER_NO 8 + +/** + * channel state machine + */ +enum qeth_channel_states { + CH_STATE_UP, + CH_STATE_DOWN, + CH_STATE_ACTIVATING, + CH_STATE_HALTED, + CH_STATE_STOPPED, + CH_STATE_RCD, + CH_STATE_RCD_DONE, +}; +/** + * card state machine + */ +enum qeth_card_states { + CARD_STATE_DOWN, + CARD_STATE_HARDSETUP, + CARD_STATE_SOFTSETUP, + CARD_STATE_UP, + CARD_STATE_RECOVER, +}; + +/** + * Protocol versions + */ +enum qeth_prot_versions { + QETH_PROT_IPV4 = 0x0004, + QETH_PROT_IPV6 = 0x0006, +}; + +enum qeth_ip_types { + QETH_IP_TYPE_NORMAL, + QETH_IP_TYPE_VIPA, + QETH_IP_TYPE_RXIP, + QETH_IP_TYPE_DEL_ALL_MC, +}; + +enum qeth_cmd_buffer_state { + BUF_STATE_FREE, + BUF_STATE_LOCKED, + BUF_STATE_PROCESSED, +}; + +struct qeth_ipato { + int enabled; + int invert4; + int invert6; + struct list_head entries; +}; + +struct qeth_channel; + +struct qeth_cmd_buffer { + enum qeth_cmd_buffer_state state; + struct qeth_channel *channel; + unsigned char *data; + int rc; + void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); +}; + +/** + * definition of a qeth channel, used for read and write + */ +struct qeth_channel { + enum qeth_channel_states state; + struct ccw1 ccw; + spinlock_t iob_lock; + wait_queue_head_t wait_q; + struct tasklet_struct irq_tasklet; + struct ccw_device *ccwdev; +/*command buffer for control data*/ + struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; + atomic_t irq_pending; + int io_buf_no; + int buf_no; +}; + +/** + * OSA card related definitions + */ +struct qeth_token { + __u32 issuer_rm_w; + __u32 issuer_rm_r; + __u32 cm_filter_w; + __u32 cm_filter_r; + __u32 cm_connection_w; + __u32 cm_connection_r; + __u32 ulp_filter_w; + __u32 ulp_filter_r; + __u32 ulp_connection_w; + __u32 ulp_connection_r; +}; + +struct qeth_seqno { + __u32 trans_hdr; + __u32 pdu_hdr; + __u32 pdu_hdr_ack; + __u16 ipa; + __u32 pkt_seqno; +}; + +struct qeth_reply { + struct list_head list; + wait_queue_head_t wait_q; + int (*callback)(struct qeth_card *, struct qeth_reply *, + unsigned long); + u32 seqno; + unsigned long offset; + atomic_t received; + int rc; + void *param; + struct qeth_card *card; + atomic_t refcnt; +}; + + +struct qeth_card_blkt { + int time_total; + int inter_packet; + int inter_packet_jumbo; +}; + +#define QETH_BROADCAST_WITH_ECHO 0x01 +#define QETH_BROADCAST_WITHOUT_ECHO 0x02 +#define QETH_LAYER2_MAC_READ 0x01 +#define QETH_LAYER2_MAC_REGISTERED 0x02 +struct qeth_card_info { + unsigned short unit_addr2; + unsigned short cula; + unsigned short chpid; + __u16 func_level; + char mcl_level[QETH_MCL_LENGTH + 1]; + int guestlan; + int mac_bits; + int portname_required; + int portno; + char portname[9]; + enum qeth_card_types type; + enum qeth_link_types link_type; + int is_multicast_different; + int initial_mtu; + int max_mtu; + int broadcast_capable; + int unique_id; + struct qeth_card_blkt blkt; + __u32 csum_mask; + enum qeth_ipa_promisc_modes promisc_mode; +}; + +struct qeth_card_options { + struct qeth_routing_info route4; + struct qeth_ipa_info ipa4; + struct qeth_ipa_info adp; /*Adapter parameters*/ + struct qeth_routing_info route6; + struct qeth_ipa_info ipa6; + enum qeth_checksum_types checksum_type; + int broadcast_mode; + int macaddr_mode; + int fake_broadcast; + int add_hhlen; + int fake_ll; + int layer2; + enum qeth_large_send_types large_send; + int performance_stats; + int rx_sg_cb; +}; + +/* + * thread bits for qeth_card thread masks + */ +enum qeth_threads { + QETH_RECOVER_THREAD = 1, +}; + +struct qeth_osn_info { + int (*assist_cb)(struct net_device *dev, void *data); + int (*data_cb)(struct sk_buff *skb); +}; + +enum qeth_discipline_id { + QETH_DISCIPLINE_LAYER3 = 0, + QETH_DISCIPLINE_LAYER2 = 1, +}; + +struct qeth_discipline { + qdio_handler_t *input_handler; + qdio_handler_t *output_handler; + int (*recover)(void *ptr); + struct ccwgroup_driver *ccwgdriver; +}; + +struct qeth_vlan_vid { + struct list_head list; + unsigned short vid; +}; + +struct qeth_mc_mac { + struct list_head list; + __u8 mc_addr[MAX_ADDR_LEN]; + unsigned char mc_addrlen; +}; + +struct qeth_card { + struct list_head list; + enum qeth_card_states state; + int lan_online; + spinlock_t lock; + struct ccwgroup_device *gdev; + struct qeth_channel read; + struct qeth_channel write; + struct qeth_channel data; + + struct net_device *dev; + struct net_device_stats stats; + + struct qeth_card_info info; + struct qeth_token token; + struct qeth_seqno seqno; + struct qeth_card_options options; + + wait_queue_head_t wait_q; + spinlock_t vlanlock; + spinlock_t mclock; + struct vlan_group *vlangrp; + struct list_head vid_list; + struct list_head mc_list; + struct work_struct kernel_thread_starter; + spinlock_t thread_mask_lock; + unsigned long thread_start_mask; + unsigned long thread_allowed_mask; + unsigned long thread_running_mask; + spinlock_t ip_lock; + struct list_head ip_list; + struct list_head *ip_tbd_list; + struct qeth_ipato ipato; + struct list_head cmd_waiter_list; + /* QDIO buffer handling */ + struct qeth_qdio_info qdio; + struct qeth_perf_stats perf_stats; + int use_hard_stop; + struct qeth_osn_info osn_info; + struct qeth_discipline discipline; + atomic_t force_alloc_skb; +}; + +struct qeth_card_list_struct { + struct list_head list; + rwlock_t rwlock; +}; + +/*some helper functions*/ +#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") + +static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) +{ + struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) + dev_get_drvdata(&cdev->dev))->dev); + return card; +} + +static inline int qeth_get_micros(void) +{ + return (int) (get_clock() >> 12); +} + +static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb, + int size) +{ + void *hdr; + + hdr = (void *) skb_push(skb, size); + /* + * sanity check, the Linux memory allocation scheme should + * never present us cases like this one (the qdio header size plus + * the first 40 bytes of the paket cross a 4k boundary) + */ + if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) != + (((unsigned long) hdr + size + + QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) { + PRINT_ERR("Misaligned packet on interface %s. Discarded.", + QETH_CARD_IFNAME(card)); + return NULL; + } + return hdr; +} + +static inline int qeth_get_ip_version(struct sk_buff *skb) +{ + switch (skb->protocol) { + case ETH_P_IPV6: + return 6; + case ETH_P_IP: + return 4; + default: + return 0; + } +} + +struct qeth_eddp_context; +extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; +extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; +const char *qeth_get_cardname_short(struct qeth_card *); +int qeth_realloc_buffer_pool(struct qeth_card *, int); +int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); +void qeth_core_free_discipline(struct qeth_card *); +int qeth_core_create_device_attributes(struct device *); +void qeth_core_remove_device_attributes(struct device *); +int qeth_core_create_osn_attributes(struct device *); +void qeth_core_remove_osn_attributes(struct device *); + +/* exports for qeth discipline device drivers */ +extern struct qeth_card_list_struct qeth_core_card_list; +extern debug_info_t *qeth_dbf_setup; +extern debug_info_t *qeth_dbf_data; +extern debug_info_t *qeth_dbf_misc; +extern debug_info_t *qeth_dbf_control; +extern debug_info_t *qeth_dbf_trace; +extern debug_info_t *qeth_dbf_sense; +extern debug_info_t *qeth_dbf_qerr; + +void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); +int qeth_threads_running(struct qeth_card *, unsigned long); +int qeth_wait_for_threads(struct qeth_card *, unsigned long); +int qeth_do_run_thread(struct qeth_card *, unsigned long); +void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); +void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); +int qeth_core_hardsetup_card(struct qeth_card *); +void qeth_print_status_message(struct qeth_card *); +int qeth_init_qdio_queues(struct qeth_card *); +int qeth_send_startlan(struct qeth_card *); +int qeth_send_stoplan(struct qeth_card *); +int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, + int (*reply_cb) + (struct qeth_card *, struct qeth_reply *, unsigned long), + void *); +struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, + enum qeth_ipa_cmds, enum qeth_prot_versions); +int qeth_query_setadapterparms(struct qeth_card *); +int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, + unsigned int, const char *); +void qeth_put_buffer_pool_entry(struct qeth_card *, + struct qeth_buffer_pool_entry *); +void qeth_queue_input_buffer(struct qeth_card *, int); +struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, + struct qdio_buffer *, struct qdio_buffer_element **, int *, + struct qeth_hdr **); +void qeth_schedule_recovery(struct qeth_card *); +void qeth_qdio_output_handler(struct ccw_device *, unsigned int, + unsigned int, unsigned int, + unsigned int, int, int, + unsigned long); +void qeth_clear_ipacmd_list(struct qeth_card *); +int qeth_qdio_clear_card(struct qeth_card *, int); +void qeth_clear_working_pool_list(struct qeth_card *); +void qeth_clear_cmd_buffers(struct qeth_channel *); +void qeth_clear_qdio_buffers(struct qeth_card *); +void qeth_setadp_promisc_mode(struct qeth_card *); +struct net_device_stats *qeth_get_stats(struct net_device *); +int qeth_change_mtu(struct net_device *, int); +int qeth_setadpparms_change_macaddr(struct qeth_card *); +void qeth_tx_timeout(struct net_device *); +void qeth_prepare_control_data(struct qeth_card *, int, + struct qeth_cmd_buffer *); +void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); +void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char); +struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); +int qeth_mdio_read(struct net_device *, int, int); +int qeth_snmp_command(struct qeth_card *, char __user *); +int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types); +struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); +int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, + unsigned long); +int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, + int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), + void *reply_param); +int qeth_get_cast_type(struct qeth_card *, struct sk_buff *); +int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); +struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *, + struct qeth_hdr **); +int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); +int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, + struct sk_buff *, struct qeth_hdr *, int, + struct qeth_eddp_context *); +int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, + struct sk_buff *, struct qeth_hdr *, + int, struct qeth_eddp_context *); +int qeth_core_get_stats_count(struct net_device *); +void qeth_core_get_ethtool_stats(struct net_device *, + struct ethtool_stats *, u64 *); +void qeth_core_get_strings(struct net_device *, u32, u8 *); +void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); + +/* exports for OSN */ +int qeth_osn_assist(struct net_device *, void *, int); +int qeth_osn_register(unsigned char *read_dev_no, struct net_device **, + int (*assist_cb)(struct net_device *, void *), + int (*data_cb)(struct sk_buff *)); +void qeth_osn_deregister(struct net_device *); + +#endif /* __QETH_CORE_H__ */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c new file mode 100644 index 00000000000..95c6fcf5895 --- /dev/null +++ b/drivers/s390/net/qeth_core_main.c @@ -0,0 +1,4540 @@ +/* + * drivers/s390/net/qeth_core_main.c + * + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher , + * Frank Pavlic , + * Thomas Spatzier , + * Frank Blaschka + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "qeth_core.h" +#include "qeth_core_offl.h" + +#define QETH_DBF_TEXT_(name, level, text...) \ + do { \ + if (qeth_dbf_passes(qeth_dbf_##name, level)) { \ + char *dbf_txt_buf = \ + get_cpu_var(qeth_core_dbf_txt_buf); \ + sprintf(dbf_txt_buf, text); \ + debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \ + put_cpu_var(qeth_core_dbf_txt_buf); \ + } \ + } while (0) + +struct qeth_card_list_struct qeth_core_card_list; +EXPORT_SYMBOL_GPL(qeth_core_card_list); +debug_info_t *qeth_dbf_setup; +EXPORT_SYMBOL_GPL(qeth_dbf_setup); +debug_info_t *qeth_dbf_data; +EXPORT_SYMBOL_GPL(qeth_dbf_data); +debug_info_t *qeth_dbf_misc; +EXPORT_SYMBOL_GPL(qeth_dbf_misc); +debug_info_t *qeth_dbf_control; +EXPORT_SYMBOL_GPL(qeth_dbf_control); +debug_info_t *qeth_dbf_trace; +EXPORT_SYMBOL_GPL(qeth_dbf_trace); +debug_info_t *qeth_dbf_sense; +EXPORT_SYMBOL_GPL(qeth_dbf_sense); +debug_info_t *qeth_dbf_qerr; +EXPORT_SYMBOL_GPL(qeth_dbf_qerr); + +static struct device *qeth_core_root_dev; +static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY; +static struct lock_class_key qdio_out_skb_queue_key; +static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf); + +static void qeth_send_control_data_cb(struct qeth_channel *, + struct qeth_cmd_buffer *); +static int qeth_issue_next_read(struct qeth_card *); +static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); +static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); +static void qeth_free_buffer_pool(struct qeth_card *); +static int qeth_qdio_establish(struct qeth_card *); + + +static inline void __qeth_fill_buffer_frag(struct sk_buff *skb, + struct qdio_buffer *buffer, int is_tso, + int *next_element_to_fill) +{ + struct skb_frag_struct *frag; + int fragno; + unsigned long addr; + int element, cnt, dlen; + + fragno = skb_shinfo(skb)->nr_frags; + element = *next_element_to_fill; + dlen = 0; + + if (is_tso) + buffer->element[element].flags = + SBAL_FLAGS_MIDDLE_FRAG; + else + buffer->element[element].flags = + SBAL_FLAGS_FIRST_FRAG; + dlen = skb->len - skb->data_len; + if (dlen) { + buffer->element[element].addr = skb->data; + buffer->element[element].length = dlen; + element++; + } + for (cnt = 0; cnt < fragno; cnt++) { + frag = &skb_shinfo(skb)->frags[cnt]; + addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + + frag->page_offset; + buffer->element[element].addr = (char *)addr; + buffer->element[element].length = frag->size; + if (cnt < (fragno - 1)) + buffer->element[element].flags = + SBAL_FLAGS_MIDDLE_FRAG; + else + buffer->element[element].flags = + SBAL_FLAGS_LAST_FRAG; + element++; + } + *next_element_to_fill = element; +} + +static inline const char *qeth_get_cardname(struct qeth_card *card) +{ + if (card->info.guestlan) { + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + return " Guest LAN QDIO"; + case QETH_CARD_TYPE_IQD: + return " Guest LAN Hiper"; + default: + return " unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + return " OSD Express"; + case QETH_CARD_TYPE_IQD: + return " HiperSockets"; + case QETH_CARD_TYPE_OSN: + return " OSN QDIO"; + default: + return " unknown"; + } + } + return " n/a"; +} + +/* max length to be returned: 14 */ +const char *qeth_get_cardname_short(struct qeth_card *card) +{ + if (card->info.guestlan) { + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + return "GuestLAN QDIO"; + case QETH_CARD_TYPE_IQD: + return "GuestLAN Hiper"; + default: + return "unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + switch (card->info.link_type) { + case QETH_LINK_TYPE_FAST_ETH: + return "OSD_100"; + case QETH_LINK_TYPE_HSTR: + return "HSTR"; + case QETH_LINK_TYPE_GBIT_ETH: + return "OSD_1000"; + case QETH_LINK_TYPE_10GBIT_ETH: + return "OSD_10GIG"; + case QETH_LINK_TYPE_LANE_ETH100: + return "OSD_FE_LANE"; + case QETH_LINK_TYPE_LANE_TR: + return "OSD_TR_LANE"; + case QETH_LINK_TYPE_LANE_ETH1000: + return "OSD_GbE_LANE"; + case QETH_LINK_TYPE_LANE: + return "OSD_ATM_LANE"; + default: + return "OSD_Express"; + } + case QETH_CARD_TYPE_IQD: + return "HiperSockets"; + case QETH_CARD_TYPE_OSN: + return "OSN"; + default: + return "unknown"; + } + } + return "n/a"; +} + +void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, + int clear_start_mask) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_allowed_mask = threads; + if (clear_start_mask) + card->thread_start_mask &= threads; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); + +int qeth_threads_running(struct qeth_card *card, unsigned long threads) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + rc = (card->thread_running_mask & threads); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_threads_running); + +int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) +{ + return wait_event_interruptible(card->wait_q, + qeth_threads_running(card, threads) == 0); +} +EXPORT_SYMBOL_GPL(qeth_wait_for_threads); + +void qeth_clear_working_pool_list(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry, *tmp; + + QETH_DBF_TEXT(trace, 5, "clwrklst"); + list_for_each_entry_safe(pool_entry, tmp, + &card->qdio.in_buf_pool.entry_list, list){ + list_del(&pool_entry->list); + } +} +EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); + +static int qeth_alloc_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry; + void *ptr; + int i, j; + + QETH_DBF_TEXT(trace, 5, "alocpool"); + for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { + pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); + if (!pool_entry) { + qeth_free_buffer_pool(card); + return -ENOMEM; + } + for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { + ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA); + if (!ptr) { + while (j > 0) + free_page((unsigned long) + pool_entry->elements[--j]); + kfree(pool_entry); + qeth_free_buffer_pool(card); + return -ENOMEM; + } + pool_entry->elements[j] = ptr; + } + list_add(&pool_entry->init_list, + &card->qdio.init_pool.entry_list); + } + return 0; +} + +int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) +{ + QETH_DBF_TEXT(trace, 2, "realcbp"); + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ + qeth_clear_working_pool_list(card); + qeth_free_buffer_pool(card); + card->qdio.in_buf_pool.buf_count = bufcnt; + card->qdio.init_pool.buf_count = bufcnt; + return qeth_alloc_buffer_pool(card); +} + +int qeth_set_large_send(struct qeth_card *card, + enum qeth_large_send_types type) +{ + int rc = 0; + + if (card->dev == NULL) { + card->options.large_send = type; + return 0; + } + if (card->state == CARD_STATE_UP) + netif_tx_disable(card->dev); + card->options.large_send = type; + switch (card->options.large_send) { + case QETH_LARGE_SEND_EDDP: + card->dev->features |= NETIF_F_TSO | NETIF_F_SG | + NETIF_F_HW_CSUM; + break; + case QETH_LARGE_SEND_TSO: + if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { + card->dev->features |= NETIF_F_TSO | NETIF_F_SG | + NETIF_F_HW_CSUM; + } else { + PRINT_WARN("TSO not supported on %s. " + "large_send set to 'no'.\n", + card->dev->name); + card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | + NETIF_F_HW_CSUM); + card->options.large_send = QETH_LARGE_SEND_NO; + rc = -EOPNOTSUPP; + } + break; + default: /* includes QETH_LARGE_SEND_NO */ + card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | + NETIF_F_HW_CSUM); + break; + } + if (card->state == CARD_STATE_UP) + netif_wake_queue(card->dev); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_set_large_send); + +static int qeth_issue_next_read(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(trace, 5, "issnxrd"); + if (card->read.state != CH_STATE_UP) + return -EIO; + iob = qeth_get_buffer(&card->read); + if (!iob) { + PRINT_WARN("issue_next_read failed: no iob available!\n"); + return -ENOMEM; + } + qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); + QETH_DBF_TEXT(trace, 6, "noirqpnd"); + rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, + (addr_t) iob, 0, 0); + if (rc) { + PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc); + atomic_set(&card->read.irq_pending, 0); + qeth_schedule_recovery(card); + wake_up(&card->wait_q); + } + return rc; +} + +static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) +{ + struct qeth_reply *reply; + + reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); + if (reply) { + atomic_set(&reply->refcnt, 1); + atomic_set(&reply->received, 0); + reply->card = card; + }; + return reply; +} + +static void qeth_get_reply(struct qeth_reply *reply) +{ + WARN_ON(atomic_read(&reply->refcnt) <= 0); + atomic_inc(&reply->refcnt); +} + +static void qeth_put_reply(struct qeth_reply *reply) +{ + WARN_ON(atomic_read(&reply->refcnt) <= 0); + if (atomic_dec_and_test(&reply->refcnt)) + kfree(reply); +} + +static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, + struct qeth_card *card) +{ + int rc; + int com; + char *ipa_name; + + com = cmd->hdr.command; + rc = cmd->hdr.return_code; + ipa_name = qeth_get_ipa_cmd_name(com); + + PRINT_ERR("%s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com, + QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc)); +} + +static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + struct qeth_ipa_cmd *cmd = NULL; + + QETH_DBF_TEXT(trace, 5, "chkipad"); + if (IS_IPA(iob->data)) { + cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); + if (IS_IPA_REPLY(cmd)) { + if (cmd->hdr.return_code && + (cmd->hdr.command < IPA_CMD_SETCCID || + cmd->hdr.command > IPA_CMD_MODCCID)) + qeth_issue_ipa_msg(cmd, card); + return cmd; + } else { + switch (cmd->hdr.command) { + case IPA_CMD_STOPLAN: + PRINT_WARN("Link failure on %s (CHPID 0x%X) - " + "there is a network problem or " + "someone pulled the cable or " + "disabled the port.\n", + QETH_CARD_IFNAME(card), + card->info.chpid); + card->lan_online = 0; + if (card->dev && netif_carrier_ok(card->dev)) + netif_carrier_off(card->dev); + return NULL; + case IPA_CMD_STARTLAN: + PRINT_INFO("Link reestablished on %s " + "(CHPID 0x%X). Scheduling " + "IP address reset.\n", + QETH_CARD_IFNAME(card), + card->info.chpid); + netif_carrier_on(card->dev); + qeth_schedule_recovery(card); + return NULL; + case IPA_CMD_MODCCID: + return cmd; + case IPA_CMD_REGISTER_LOCAL_ADDR: + QETH_DBF_TEXT(trace, 3, "irla"); + break; + case IPA_CMD_UNREGISTER_LOCAL_ADDR: + QETH_DBF_TEXT(trace, 3, "urla"); + break; + default: + PRINT_WARN("Received data is IPA " + "but not a reply!\n"); + break; + } + } + } + return cmd; +} + +void qeth_clear_ipacmd_list(struct qeth_card *card) +{ + struct qeth_reply *reply, *r; + unsigned long flags; + + QETH_DBF_TEXT(trace, 4, "clipalst"); + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { + qeth_get_reply(reply); + reply->rc = -EIO; + atomic_inc(&reply->received); + list_del_init(&reply->list); + wake_up(&reply->wait_q); + qeth_put_reply(reply); + } + spin_unlock_irqrestore(&card->lock, flags); +} +EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); + +static int qeth_check_idx_response(unsigned char *buffer) +{ + if (!buffer) + return 0; + + QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN); + if ((buffer[2] & 0xc0) == 0xc0) { + PRINT_WARN("received an IDX TERMINATE " + "with cause code 0x%02x%s\n", + buffer[4], + ((buffer[4] == 0x22) ? + " -- try another portname" : "")); + QETH_DBF_TEXT(trace, 2, "ckidxres"); + QETH_DBF_TEXT(trace, 2, " idxterm"); + QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO); + return -EIO; + } + return 0; +} + +static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, + __u32 len) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(trace, 4, "setupccw"); + card = CARD_FROM_CDEV(channel->ccwdev); + if (channel == &card->read) + memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); + else + memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); + channel->ccw.count = len; + channel->ccw.cda = (__u32) __pa(iob); +} + +static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) +{ + __u8 index; + + QETH_DBF_TEXT(trace, 6, "getbuff"); + index = channel->io_buf_no; + do { + if (channel->iob[index].state == BUF_STATE_FREE) { + channel->iob[index].state = BUF_STATE_LOCKED; + channel->io_buf_no = (channel->io_buf_no + 1) % + QETH_CMD_BUFFER_NO; + memset(channel->iob[index].data, 0, QETH_BUFSIZE); + return channel->iob + index; + } + index = (index + 1) % QETH_CMD_BUFFER_NO; + } while (index != channel->io_buf_no); + + return NULL; +} + +void qeth_release_buffer(struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + unsigned long flags; + + QETH_DBF_TEXT(trace, 6, "relbuff"); + spin_lock_irqsave(&channel->iob_lock, flags); + memset(iob->data, 0, QETH_BUFSIZE); + iob->state = BUF_STATE_FREE; + iob->callback = qeth_send_control_data_cb; + iob->rc = 0; + spin_unlock_irqrestore(&channel->iob_lock, flags); +} +EXPORT_SYMBOL_GPL(qeth_release_buffer); + +static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) +{ + struct qeth_cmd_buffer *buffer = NULL; + unsigned long flags; + + spin_lock_irqsave(&channel->iob_lock, flags); + buffer = __qeth_get_buffer(channel); + spin_unlock_irqrestore(&channel->iob_lock, flags); + return buffer; +} + +struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel) +{ + struct qeth_cmd_buffer *buffer; + wait_event(channel->wait_q, + ((buffer = qeth_get_buffer(channel)) != NULL)); + return buffer; +} +EXPORT_SYMBOL_GPL(qeth_wait_for_buffer); + +void qeth_clear_cmd_buffers(struct qeth_channel *channel) +{ + int cnt; + + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) + qeth_release_buffer(channel, &channel->iob[cnt]); + channel->buf_no = 0; + channel->io_buf_no = 0; +} +EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); + +static void qeth_send_control_data_cb(struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + struct qeth_card *card; + struct qeth_reply *reply, *r; + struct qeth_ipa_cmd *cmd; + unsigned long flags; + int keep_reply; + + QETH_DBF_TEXT(trace, 4, "sndctlcb"); + + card = CARD_FROM_CDEV(channel->ccwdev); + if (qeth_check_idx_response(iob->data)) { + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + goto out; + } + + cmd = qeth_check_ipa_data(card, iob); + if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) + goto out; + /*in case of OSN : check if cmd is set */ + if (card->info.type == QETH_CARD_TYPE_OSN && + cmd && + cmd->hdr.command != IPA_CMD_STARTLAN && + card->osn_info.assist_cb != NULL) { + card->osn_info.assist_cb(card->dev, cmd); + goto out; + } + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { + if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || + ((cmd) && (reply->seqno == cmd->hdr.seqno))) { + qeth_get_reply(reply); + list_del_init(&reply->list); + spin_unlock_irqrestore(&card->lock, flags); + keep_reply = 0; + if (reply->callback != NULL) { + if (cmd) { + reply->offset = (__u16)((char *)cmd - + (char *)iob->data); + keep_reply = reply->callback(card, + reply, + (unsigned long)cmd); + } else + keep_reply = reply->callback(card, + reply, + (unsigned long)iob); + } + if (cmd) + reply->rc = (u16) cmd->hdr.return_code; + else if (iob->rc) + reply->rc = iob->rc; + if (keep_reply) { + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, + &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + } else { + atomic_inc(&reply->received); + wake_up(&reply->wait_q); + } + qeth_put_reply(reply); + goto out; + } + } + spin_unlock_irqrestore(&card->lock, flags); +out: + memcpy(&card->seqno.pdu_hdr_ack, + QETH_PDU_HEADER_SEQ_NO(iob->data), + QETH_SEQ_NO_LENGTH); + qeth_release_buffer(channel, iob); +} + +static int qeth_setup_channel(struct qeth_channel *channel) +{ + int cnt; + + QETH_DBF_TEXT(setup, 2, "setupch"); + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { + channel->iob[cnt].data = (char *) + kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = BUF_STATE_FREE; + channel->iob[cnt].channel = channel; + channel->iob[cnt].callback = qeth_send_control_data_cb; + channel->iob[cnt].rc = 0; + } + if (cnt < QETH_CMD_BUFFER_NO) { + while (cnt-- > 0) + kfree(channel->iob[cnt].data); + return -ENOMEM; + } + channel->buf_no = 0; + channel->io_buf_no = 0; + atomic_set(&channel->irq_pending, 0); + spin_lock_init(&channel->iob_lock); + + init_waitqueue_head(&channel->wait_q); + return 0; +} + +static int qeth_set_thread_start_bit(struct qeth_card *card, + unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if (!(card->thread_allowed_mask & thread) || + (card->thread_start_mask & thread)) { + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return -EPERM; + } + card->thread_start_mask |= thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return 0; +} + +void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_start_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); + +void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_running_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); + +static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if (card->thread_start_mask & thread) { + if ((card->thread_allowed_mask & thread) && + !(card->thread_running_mask & thread)) { + rc = 1; + card->thread_start_mask &= ~thread; + card->thread_running_mask |= thread; + } else + rc = -EPERM; + } + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + int rc = 0; + + wait_event(card->wait_q, + (rc = __qeth_do_run_thread(card, thread)) >= 0); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_run_thread); + +void qeth_schedule_recovery(struct qeth_card *card) +{ + QETH_DBF_TEXT(trace, 2, "startrec"); + if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +} +EXPORT_SYMBOL_GPL(qeth_schedule_recovery); + +static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) +{ + int dstat, cstat; + char *sense; + + sense = (char *) irb->ecw; + cstat = irb->scsw.cstat; + dstat = irb->scsw.dstat; + + if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | + SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { + QETH_DBF_TEXT(trace, 2, "CGENCHK"); + PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", + cdev->dev.bus_id, dstat, cstat); + print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, + 16, 1, irb, 64, 1); + return 1; + } + + if (dstat & DEV_STAT_UNIT_CHECK) { + if (sense[SENSE_RESETTING_EVENT_BYTE] & + SENSE_RESETTING_EVENT_FLAG) { + QETH_DBF_TEXT(trace, 2, "REVIND"); + return 1; + } + if (sense[SENSE_COMMAND_REJECT_BYTE] & + SENSE_COMMAND_REJECT_FLAG) { + QETH_DBF_TEXT(trace, 2, "CMDREJi"); + return 0; + } + if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { + QETH_DBF_TEXT(trace, 2, "AFFE"); + return 1; + } + if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { + QETH_DBF_TEXT(trace, 2, "ZEROSEN"); + return 0; + } + QETH_DBF_TEXT(trace, 2, "DGENCHK"); + return 1; + } + return 0; +} + +static long __qeth_check_irb_error(struct ccw_device *cdev, + unsigned long intparm, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id); + QETH_DBF_TEXT(trace, 2, "ckirberr"); + QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO); + break; + case -ETIMEDOUT: + PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); + QETH_DBF_TEXT(trace, 2, "ckirberr"); + QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT); + if (intparm == QETH_RCD_PARM) { + struct qeth_card *card = CARD_FROM_CDEV(cdev); + + if (card && (card->data.ccwdev == cdev)) { + card->data.state = CH_STATE_DOWN; + wake_up(&card->wait_q); + } + } + break; + default: + PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), + cdev->dev.bus_id); + QETH_DBF_TEXT(trace, 2, "ckirberr"); + QETH_DBF_TEXT(trace, 2, " rc???"); + } + return PTR_ERR(irb); +} + +static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + int rc; + int cstat, dstat; + struct qeth_cmd_buffer *buffer; + struct qeth_channel *channel; + struct qeth_card *card; + struct qeth_cmd_buffer *iob; + __u8 index; + + QETH_DBF_TEXT(trace, 5, "irq"); + + if (__qeth_check_irb_error(cdev, intparm, irb)) + return; + cstat = irb->scsw.cstat; + dstat = irb->scsw.dstat; + + card = CARD_FROM_CDEV(cdev); + if (!card) + return; + + if (card->read.ccwdev == cdev) { + channel = &card->read; + QETH_DBF_TEXT(trace, 5, "read"); + } else if (card->write.ccwdev == cdev) { + channel = &card->write; + QETH_DBF_TEXT(trace, 5, "write"); + } else { + channel = &card->data; + QETH_DBF_TEXT(trace, 5, "data"); + } + atomic_set(&channel->irq_pending, 0); + + if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) + channel->state = CH_STATE_STOPPED; + + if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) + channel->state = CH_STATE_HALTED; + + /*let's wake up immediately on data channel*/ + if ((channel == &card->data) && (intparm != 0) && + (intparm != QETH_RCD_PARM)) + goto out; + + if (intparm == QETH_CLEAR_CHANNEL_PARM) { + QETH_DBF_TEXT(trace, 6, "clrchpar"); + /* we don't have to handle this further */ + intparm = 0; + } + if (intparm == QETH_HALT_CHANNEL_PARM) { + QETH_DBF_TEXT(trace, 6, "hltchpar"); + /* we don't have to handle this further */ + intparm = 0; + } + if ((dstat & DEV_STAT_UNIT_EXCEP) || + (dstat & DEV_STAT_UNIT_CHECK) || + (cstat)) { + if (irb->esw.esw0.erw.cons) { + /* TODO: we should make this s390dbf */ + PRINT_WARN("sense data available on channel %s.\n", + CHANNEL_ID(channel)); + PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat); + print_hex_dump(KERN_WARNING, "qeth: irb ", + DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); + print_hex_dump(KERN_WARNING, "qeth: sense data ", + DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); + } + if (intparm == QETH_RCD_PARM) { + channel->state = CH_STATE_DOWN; + goto out; + } + rc = qeth_get_problem(cdev, irb); + if (rc) { + qeth_schedule_recovery(card); + goto out; + } + } + + if (intparm == QETH_RCD_PARM) { + channel->state = CH_STATE_RCD_DONE; + goto out; + } + if (intparm) { + buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + buffer->state = BUF_STATE_PROCESSED; + } + if (channel == &card->data) + return; + if (channel == &card->read && + channel->state == CH_STATE_UP) + qeth_issue_next_read(card); + + iob = channel->iob; + index = channel->buf_no; + while (iob[index].state == BUF_STATE_PROCESSED) { + if (iob[index].callback != NULL) + iob[index].callback(channel, iob + index); + + index = (index + 1) % QETH_CMD_BUFFER_NO; + } + channel->buf_no = index; +out: + wake_up(&card->wait_q); + return; +} + +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf) +{ + int i; + struct sk_buff *skb; + + /* is PCI flag set on buffer? */ + if (buf->buffer->element[0].flags & 0x40) + atomic_dec(&queue->set_pci_flags_count); + + skb = skb_dequeue(&buf->skb_list); + while (skb) { + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); + skb = skb_dequeue(&buf->skb_list); + } + qeth_eddp_buf_release_contexts(buf); + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { + buf->buffer->element[i].length = 0; + buf->buffer->element[i].addr = NULL; + buf->buffer->element[i].flags = 0; + } + buf->next_element_to_fill = 0; + atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); +} + +void qeth_clear_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + QETH_DBF_TEXT(trace, 2, "clearqdbf"); + /* clear outbound buffers to free skbs */ + for (i = 0; i < card->qdio.no_out_queues; ++i) + if (card->qdio.out_qs[i]) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) + qeth_clear_output_buffer(card->qdio.out_qs[i], + &card->qdio.out_qs[i]->bufs[j]); + } +} +EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); + +static void qeth_free_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry, *tmp; + int i = 0; + QETH_DBF_TEXT(trace, 5, "freepool"); + list_for_each_entry_safe(pool_entry, tmp, + &card->qdio.init_pool.entry_list, init_list){ + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) + free_page((unsigned long)pool_entry->elements[i]); + list_del(&pool_entry->init_list); + kfree(pool_entry); + } +} + +static void qeth_free_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + QETH_DBF_TEXT(trace, 2, "freeqdbf"); + if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == + QETH_QDIO_UNINITIALIZED) + return; + kfree(card->qdio.in_q); + card->qdio.in_q = NULL; + /* inbound buffer pool */ + qeth_free_buffer_pool(card); + /* free outbound qdio_qs */ + if (card->qdio.out_qs) { + for (i = 0; i < card->qdio.no_out_queues; ++i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) + qeth_clear_output_buffer(card->qdio.out_qs[i], + &card->qdio.out_qs[i]->bufs[j]); + kfree(card->qdio.out_qs[i]); + } + kfree(card->qdio.out_qs); + card->qdio.out_qs = NULL; + } +} + +static void qeth_clean_channel(struct qeth_channel *channel) +{ + int cnt; + + QETH_DBF_TEXT(setup, 2, "freech"); + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) + kfree(channel->iob[cnt].data); +} + +static int qeth_is_1920_device(struct qeth_card *card) +{ + int single_queue = 0; + struct ccw_device *ccwdev; + struct channelPath_dsc { + u8 flags; + u8 lsn; + u8 desc; + u8 chpid; + u8 swla; + u8 zeroes; + u8 chla; + u8 chpp; + } *chp_dsc; + + QETH_DBF_TEXT(setup, 2, "chk_1920"); + + ccwdev = card->data.ccwdev; + chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); + if (chp_dsc != NULL) { + /* CHPP field bit 6 == 1 -> single queue */ + single_queue = ((chp_dsc->chpp & 0x02) == 0x02); + kfree(chp_dsc); + } + QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue); + return single_queue; +} + +static void qeth_init_qdio_info(struct qeth_card *card) +{ + QETH_DBF_TEXT(setup, 4, "intqdinf"); + atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + /* inbound */ + card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; + card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; + INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); + INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); +} + +static void qeth_set_intial_options(struct qeth_card *card) +{ + card->options.route4.type = NO_ROUTER; + card->options.route6.type = NO_ROUTER; + card->options.checksum_type = QETH_CHECKSUM_DEFAULT; + card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; + card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL; + card->options.fake_broadcast = 0; + card->options.add_hhlen = DEFAULT_ADD_HHLEN; + card->options.fake_ll = 0; + card->options.performance_stats = 0; + card->options.rx_sg_cb = QETH_RX_SG_CB; +} + +static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x", + (u8) card->thread_start_mask, + (u8) card->thread_allowed_mask, + (u8) card->thread_running_mask); + rc = (card->thread_start_mask & thread); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +static void qeth_start_kernel_thread(struct work_struct *work) +{ + struct qeth_card *card = container_of(work, struct qeth_card, + kernel_thread_starter); + QETH_DBF_TEXT(trace , 2, "strthrd"); + + if (card->read.state != CH_STATE_UP && + card->write.state != CH_STATE_UP) + return; + if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) + kthread_run(card->discipline.recover, (void *) card, + "qeth_recover"); +} + +static int qeth_setup_card(struct qeth_card *card) +{ + + QETH_DBF_TEXT(setup, 2, "setupcrd"); + QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); + + card->read.state = CH_STATE_DOWN; + card->write.state = CH_STATE_DOWN; + card->data.state = CH_STATE_DOWN; + card->state = CARD_STATE_DOWN; + card->lan_online = 0; + card->use_hard_stop = 0; + card->dev = NULL; + spin_lock_init(&card->vlanlock); + spin_lock_init(&card->mclock); + card->vlangrp = NULL; + spin_lock_init(&card->lock); + spin_lock_init(&card->ip_lock); + spin_lock_init(&card->thread_mask_lock); + card->thread_start_mask = 0; + card->thread_allowed_mask = 0; + card->thread_running_mask = 0; + INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); + INIT_LIST_HEAD(&card->ip_list); + card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (!card->ip_tbd_list) { + QETH_DBF_TEXT(setup, 0, "iptbdnom"); + return -ENOMEM; + } + INIT_LIST_HEAD(card->ip_tbd_list); + INIT_LIST_HEAD(&card->cmd_waiter_list); + init_waitqueue_head(&card->wait_q); + /* intial options */ + qeth_set_intial_options(card); + /* IP address takeover */ + INIT_LIST_HEAD(&card->ipato.entries); + card->ipato.enabled = 0; + card->ipato.invert4 = 0; + card->ipato.invert6 = 0; + /* init QDIO stuff */ + qeth_init_qdio_info(card); + return 0; +} + +static struct qeth_card *qeth_alloc_card(void) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(setup, 2, "alloccrd"); + card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); + if (!card) + return NULL; + QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); + if (qeth_setup_channel(&card->read)) { + kfree(card); + return NULL; + } + if (qeth_setup_channel(&card->write)) { + qeth_clean_channel(&card->read); + kfree(card); + return NULL; + } + card->options.layer2 = -1; + return card; +} + +static int qeth_determine_card_type(struct qeth_card *card) +{ + int i = 0; + + QETH_DBF_TEXT(setup, 2, "detcdtyp"); + + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + while (known_devices[i][4]) { + if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) && + (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) { + card->info.type = known_devices[i][4]; + card->qdio.no_out_queues = known_devices[i][8]; + card->info.is_multicast_different = known_devices[i][9]; + if (qeth_is_1920_device(card)) { + PRINT_INFO("Priority Queueing not able " + "due to hardware limitations!\n"); + card->qdio.no_out_queues = 1; + card->qdio.default_out_queue = 0; + } + return 0; + } + i++; + } + card->info.type = QETH_CARD_TYPE_UNKNOWN; + PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card)); + return -ENOENT; +} + +static int qeth_clear_channel(struct qeth_channel *channel) +{ + unsigned long flags; + struct qeth_card *card; + int rc; + + QETH_DBF_TEXT(trace, 3, "clearch"); + card = CARD_FROM_CDEV(channel->ccwdev); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_STOPPED) + return -ETIME; + channel->state = CH_STATE_DOWN; + return 0; +} + +static int qeth_halt_channel(struct qeth_channel *channel) +{ + unsigned long flags; + struct qeth_card *card; + int rc; + + QETH_DBF_TEXT(trace, 3, "haltch"); + card = CARD_FROM_CDEV(channel->ccwdev); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_HALTED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_HALTED) + return -ETIME; + return 0; +} + +static int qeth_halt_channels(struct qeth_card *car