diff options
Diffstat (limited to 'drivers/net/ethernet/brocade/bna/bnad.c')
| -rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.c | 676 | 
1 files changed, 470 insertions, 206 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index b78e69e0e52..3a77f9ead00 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"  /*   * Global variables   */ -u32 bnad_rxqs_per_cq = 2; +static u32 bnad_rxqs_per_cq = 2;  static u32 bna_id;  static struct mutex bnad_list_mutex;  static LIST_HEAD(bnad_list); @@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,  		dma_unmap_page(&bnad->pcidev->dev,  			dma_unmap_addr(&unmap->vectors[vector], dma_addr), -			skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE); +			dma_unmap_len(&unmap->vectors[vector], dma_len), +			DMA_TO_DEVICE);  		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);  		nvecs--;  	} @@ -248,7 +249,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)  	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))  		bna_ib_ack(tcb->i_dbell, sent); -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);  	return sent; @@ -282,27 +283,32 @@ static int  bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)  {  	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; -	int mtu, order; +	int order;  	bnad_rxq_alloc_uninit(bnad, rcb); -	mtu = bna_enet_mtu_get(&bnad->bna.enet); -	order = get_order(mtu); +	order = get_order(rcb->rxq->buffer_size); + +	unmap_q->type = BNAD_RXBUF_PAGE;  	if (bna_is_small_rxq(rcb->id)) {  		unmap_q->alloc_order = 0;  		unmap_q->map_size = rcb->rxq->buffer_size;  	} else { -		unmap_q->alloc_order = order; -		unmap_q->map_size = -			(rcb->rxq->buffer_size > 2048) ? -			PAGE_SIZE << order : 2048; +		if (rcb->rxq->multi_buffer) { +			unmap_q->alloc_order = 0; +			unmap_q->map_size = rcb->rxq->buffer_size; +			unmap_q->type = BNAD_RXBUF_MULTI_BUFF; +		} else { +			unmap_q->alloc_order = order; +			unmap_q->map_size = +				(rcb->rxq->buffer_size > 2048) ? +				PAGE_SIZE << order : 2048; +		}  	}  	BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); -	unmap_q->type = BNAD_RXBUF_PAGE; -  	return 0;  } @@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)  	for (i = 0; i < rcb->q_depth; i++) {  		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; -		if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) -			bnad_rxq_cleanup_page(bnad, unmap); -		else +		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))  			bnad_rxq_cleanup_skb(bnad, unmap); +		else +			bnad_rxq_cleanup_page(bnad, unmap);  	}  	bnad_rxq_alloc_uninit(bnad, rcb);  } @@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)  	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))  		return; -	if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) -		bnad_rxq_refill_page(bnad, rcb, to_alloc); -	else +	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))  		bnad_rxq_refill_skb(bnad, rcb, to_alloc); +	else +		bnad_rxq_refill_page(bnad, rcb, to_alloc);  }  #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ @@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)  #define flags_udp6 (BNA_CQ_EF_IPV6 | \  				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) -static inline struct sk_buff * -bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl, -		struct bnad_rx_unmap_q *unmap_q, -		struct bnad_rx_unmap *unmap, -		u32 length, u32 flags) +static void +bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, +		    u32 sop_ci, u32 nvecs)  { -	struct bnad *bnad = rx_ctrl->bnad; -	struct sk_buff *skb; +	struct bnad_rx_unmap_q *unmap_q; +	struct bnad_rx_unmap *unmap; +	u32 ci, vec; -	if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) { -		skb = napi_get_frags(&rx_ctrl->napi); -		if (unlikely(!skb)) -			return NULL; +	unmap_q = rcb->unmap_q; +	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) { +		unmap = &unmap_q->unmap[ci]; +		BNA_QE_INDX_INC(ci, rcb->q_depth); + +		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) +			bnad_rxq_cleanup_skb(bnad, unmap); +		else +			bnad_rxq_cleanup_page(bnad, unmap); +	} +} + +static void +bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, +			u32 sop_ci, u32 nvecs, u32 last_fraglen) +{ +	struct bnad *bnad; +	u32 ci, vec, len, totlen = 0; +	struct bnad_rx_unmap_q *unmap_q; +	struct bnad_rx_unmap *unmap; + +	unmap_q = rcb->unmap_q; +	bnad = rcb->bnad; + +	/* prefetch header */ +	prefetch(page_address(unmap_q->unmap[sop_ci].page) + +			unmap_q->unmap[sop_ci].page_offset); + +	for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) { +		unmap = &unmap_q->unmap[ci]; +		BNA_QE_INDX_INC(ci, rcb->q_depth);  		dma_unmap_page(&bnad->pcidev->dev,  				dma_unmap_addr(&unmap->vector, dma_addr),  				unmap->vector.len, DMA_FROM_DEVICE); + +		len = (vec == nvecs) ? +			last_fraglen : unmap->vector.len; +		totlen += len; +  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, -				unmap->page, unmap->page_offset, length); -		skb->len += length; -		skb->data_len += length; -		skb->truesize += length; +				unmap->page, unmap->page_offset, len);  		unmap->page = NULL;  		unmap->vector.len = 0; - -		return skb;  	} -	skb = unmap->skb; -	BUG_ON(!skb); +	skb->len += totlen; +	skb->data_len += totlen; +	skb->truesize += totlen; +} + +static inline void +bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, +		  struct bnad_rx_unmap *unmap, u32 len) +{ +	prefetch(skb->data);  	dma_unmap_single(&bnad->pcidev->dev,  			dma_unmap_addr(&unmap->vector, dma_addr),  			unmap->vector.len, DMA_FROM_DEVICE); -	skb_put(skb, length); - +	skb_put(skb, len);  	skb->protocol = eth_type_trans(skb, bnad->netdev);  	unmap->skb = NULL;  	unmap->vector.len = 0; -	return skb;  }  static u32  bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)  { -	struct bna_cq_entry *cq, *cmpl; +	struct bna_cq_entry *cq, *cmpl, *next_cmpl;  	struct bna_rcb *rcb = NULL;  	struct bnad_rx_unmap_q *unmap_q; -	struct bnad_rx_unmap *unmap; -	struct sk_buff *skb; +	struct bnad_rx_unmap *unmap = NULL; +	struct sk_buff *skb = NULL;  	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;  	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; -	u32 packets = 0, length = 0, flags, masked_flags; +	u32 packets = 0, len = 0, totlen = 0; +	u32 pi, vec, sop_ci = 0, nvecs = 0; +	u32 flags, masked_flags;  	prefetch(bnad->netdev);  	cq = ccb->sw_q;  	cmpl = &cq[ccb->producer_index]; -	while (cmpl->valid && (packets < budget)) { -		packets++; -		flags = ntohl(cmpl->flags); -		length = ntohs(cmpl->length); +	while (packets < budget) { +		if (!cmpl->valid) +			break; +		/* The 'valid' field is set by the adapter, only after writing +		 * the other fields of completion entry. Hence, do not load +		 * other fields of completion entry *before* the 'valid' is +		 * loaded. Adding the rmb() here prevents the compiler and/or +		 * CPU from reordering the reads which would potentially result +		 * in reading stale values in completion entry. +		 */ +		rmb(); +  		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));  		if (bna_is_small_rxq(cmpl->rxq_id)) @@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)  			rcb = ccb->rcb[0];  		unmap_q = rcb->unmap_q; -		unmap = &unmap_q->unmap[rcb->consumer_index]; -		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | -					BNA_CQ_EF_FCS_ERROR | -					BNA_CQ_EF_TOO_LONG))) { -			if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) -				bnad_rxq_cleanup_page(bnad, unmap); -			else -				bnad_rxq_cleanup_skb(bnad, unmap); +		/* start of packet ci */ +		sop_ci = rcb->consumer_index; + +		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { +			unmap = &unmap_q->unmap[sop_ci]; +			skb = unmap->skb; +		} else { +			skb = napi_get_frags(&rx_ctrl->napi); +			if (unlikely(!skb)) +				break; +		} +		prefetch(skb); + +		flags = ntohl(cmpl->flags); +		len = ntohs(cmpl->length); +		totlen = len; +		nvecs = 1; +		/* Check all the completions for this frame. +		 * busy-wait doesn't help much, break here. +		 */ +		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && +		    (flags & BNA_CQ_EF_EOP) == 0) { +			pi = ccb->producer_index; +			do { +				BNA_QE_INDX_INC(pi, ccb->q_depth); +				next_cmpl = &cq[pi]; + +				if (!next_cmpl->valid) +					break; +				/* The 'valid' field is set by the adapter, only +				 * after writing the other fields of completion +				 * entry. Hence, do not load other fields of +				 * completion entry *before* the 'valid' is +				 * loaded. Adding the rmb() here prevents the +				 * compiler and/or CPU from reordering the reads +				 * which would potentially result in reading +				 * stale values in completion entry. +				 */ +				rmb(); + +				len = ntohs(next_cmpl->length); +				flags = ntohl(next_cmpl->flags); + +				nvecs++; +				totlen += len; +			} while ((flags & BNA_CQ_EF_EOP) == 0); + +			if (!next_cmpl->valid) +				break; +		} + +		/* TODO: BNA_CQ_EF_LOCAL ? */ +		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | +						BNA_CQ_EF_FCS_ERROR | +						BNA_CQ_EF_TOO_LONG))) { +			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);  			rcb->rxq->rx_packets_with_error++; +  			goto next;  		} -		skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap, -				length, flags); +		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) +			bnad_cq_setup_skb(bnad, skb, unmap, len); +		else +			bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); -		if (unlikely(!skb)) -			break; +		packets++; +		rcb->rxq->rx_packets++; +		rcb->rxq->rx_bytes += totlen; +		ccb->bytes_per_intr += totlen;  		masked_flags = flags & flags_cksum_prot_mask; @@ -606,21 +707,22 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)  		else  			skb_checksum_none_assert(skb); -		rcb->rxq->rx_packets++; -		rcb->rxq->rx_bytes += length; - -		if (flags & BNA_CQ_EF_VLAN) +		if ((flags & BNA_CQ_EF_VLAN) && +		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))  			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); -		if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) -			napi_gro_frags(&rx_ctrl->napi); -		else +		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))  			netif_receive_skb(skb); +		else +			napi_gro_frags(&rx_ctrl->napi);  next: -		cmpl->valid = 0; -		BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth); -		BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); +		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); +		for (vec = 0; vec < nvecs; vec++) { +			cmpl = &cq[ccb->producer_index]; +			cmpl->valid = 0; +			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); +		}  		cmpl = &cq[ccb->producer_index];  	} @@ -1024,7 +1126,7 @@ bnad_tx_cleanup(struct delayed_work *work)  		bnad_txq_cleanup(bnad, tcb); -		smp_mb__before_clear_bit(); +		smp_mb__before_atomic();  		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);  	} @@ -1899,8 +2001,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)  	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,  			tx_info);  	spin_unlock_irqrestore(&bnad->bna_lock, flags); -	if (!tx) +	if (!tx) { +		err = -ENOMEM;  		goto err_return; +	}  	tx_info->tx = tx;  	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, @@ -1911,7 +2015,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)  		err = bnad_tx_msix_register(bnad, tx_info,  			tx_id, bnad->num_txq_per_tx);  		if (err) -			goto err_return; +			goto cleanup_tx;  	}  	spin_lock_irqsave(&bnad->bna_lock, flags); @@ -1920,6 +2024,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)  	return 0; +cleanup_tx: +	spin_lock_irqsave(&bnad->bna_lock, flags); +	bna_tx_destroy(tx_info->tx); +	spin_unlock_irqrestore(&bnad->bna_lock, flags); +	tx_info->tx = NULL; +	tx_info->tx_id = 0;  err_return:  	bnad_tx_res_free(bnad, res_info);  	return err; @@ -1930,6 +2040,7 @@ err_return:  static void  bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)  { +	memset(rx_config, 0, sizeof(*rx_config));  	rx_config->rx_type = BNA_RX_T_REGULAR;  	rx_config->num_paths = bnad->num_rxp_per_rx;  	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; @@ -1950,12 +2061,43 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)  		memset(&rx_config->rss_config, 0,  		       sizeof(rx_config->rss_config));  	} + +	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); +	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; + +	/* BNA_RXP_SINGLE - one data-buffer queue +	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues +	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues +	 */ +	/* TODO: configurable param for queue type */  	rx_config->rxp_type = BNA_RXP_SLR; -	rx_config->q_depth = bnad->rxq_depth; -	rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; +	if (BNAD_PCI_DEV_IS_CAT2(bnad) && +	    rx_config->frame_size > 4096) { +		/* though size_routing_enable is set in SLR, +		 * small packets may get routed to same rxq. +		 * set buf_size to 2048 instead of PAGE_SIZE. +		 */ +		rx_config->q0_buf_size = 2048; +		/* this should be in multiples of 2 */ +		rx_config->q0_num_vecs = 4; +		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; +		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; +	} else { +		rx_config->q0_buf_size = rx_config->frame_size; +		rx_config->q0_num_vecs = 1; +		rx_config->q0_depth = bnad->rxq_depth; +	} + +	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */ +	if (rx_config->rxp_type == BNA_RXP_SLR) { +		rx_config->q1_depth = bnad->rxq_depth; +		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; +	} -	rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; +	rx_config->vlan_strip_status = +		(bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? +		BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;  }  static void @@ -1969,6 +2111,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)  }  /* Called with mutex_lock(&bnad->conf_mutex) held */ +static u32 +bnad_reinit_rx(struct bnad *bnad) +{ +	struct net_device *netdev = bnad->netdev; +	u32 err = 0, current_err = 0; +	u32 rx_id = 0, count = 0; +	unsigned long flags; + +	/* destroy and create new rx objects */ +	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { +		if (!bnad->rx_info[rx_id].rx) +			continue; +		bnad_destroy_rx(bnad, rx_id); +	} + +	spin_lock_irqsave(&bnad->bna_lock, flags); +	bna_enet_mtu_set(&bnad->bna.enet, +			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); +	spin_unlock_irqrestore(&bnad->bna_lock, flags); + +	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { +		count++; +		current_err = bnad_setup_rx(bnad, rx_id); +		if (current_err && !err) { +			err = current_err; +			pr_err("RXQ:%u setup failed\n", rx_id); +		} +	} + +	/* restore rx configuration */ +	if (bnad->rx_info[0].rx && !err) { +		bnad_restore_vlans(bnad, 0); +		bnad_enable_default_bcast(bnad); +		spin_lock_irqsave(&bnad->bna_lock, flags); +		bnad_mac_addr_set_locked(bnad, netdev->dev_addr); +		spin_unlock_irqrestore(&bnad->bna_lock, flags); +		bnad_set_rx_mode(netdev); +	} + +	return count; +} + +/* Called with bnad_conf_lock() held */  void  bnad_destroy_rx(struct bnad *bnad, u32 rx_id)  { @@ -2047,13 +2232,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)  	spin_unlock_irqrestore(&bnad->bna_lock, flags);  	/* Fill Unmap Q memory requirements */ -	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ], -			rx_config->num_paths + -			((rx_config->rxp_type == BNA_RXP_SINGLE) ? -			 0 : rx_config->num_paths), -			((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) + -			 sizeof(struct bnad_rx_unmap_q))); - +	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ], +				 rx_config->num_paths, +			(rx_config->q0_depth * +			 sizeof(struct bnad_rx_unmap)) + +			 sizeof(struct bnad_rx_unmap_q)); + +	if (rx_config->rxp_type != BNA_RXP_SINGLE) { +		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ], +					 rx_config->num_paths, +				(rx_config->q1_depth * +				 sizeof(struct bnad_rx_unmap) + +				 sizeof(struct bnad_rx_unmap_q))); +	}  	/* Allocate resource */  	err = bnad_rx_res_alloc(bnad, res_info, rx_id);  	if (err) @@ -2305,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)  {  	int err; -	if (skb_header_cloned(skb)) { -		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); -		if (err) { -			BNAD_UPDATE_CTR(bnad, tso_err); -			return err; -		} +	err = skb_cow_head(skb, 0); +	if (err < 0) { +		BNAD_UPDATE_CTR(bnad, tso_err); +		return err;  	}  	/* @@ -2478,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad)  	for (i = 0; i < bnad->msix_num; i++)  		bnad->msix_table[i].entry = i; -	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); -	if (ret > 0) { -		/* Not enough MSI-X vectors. */ +	ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, +				    1, bnad->msix_num); +	if (ret < 0) { +		goto intx_mode; +	} else if (ret < bnad->msix_num) {  		pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",  			ret, bnad->msix_num); @@ -2493,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad)  		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +  			 BNAD_MAILBOX_MSIX_VECTORS; -		if (bnad->msix_num > ret) +		if (bnad->msix_num > ret) { +			pci_disable_msix(bnad->pcidev);  			goto intx_mode; - -		/* Try once more with adjusted numbers */ -		/* If this fails, fall back to INTx */ -		ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, -				      bnad->msix_num); -		if (ret) -			goto intx_mode; - -	} else if (ret < 0) -		goto intx_mode; +		} +	}  	pci_intx(bnad->pcidev, 0); @@ -2548,7 +2732,6 @@ bnad_open(struct net_device *netdev)  	int err;  	struct bnad *bnad = netdev_priv(netdev);  	struct bna_pause_config pause_config; -	int mtu;  	unsigned long flags;  	mutex_lock(&bnad->conf_mutex); @@ -2567,10 +2750,9 @@ bnad_open(struct net_device *netdev)  	pause_config.tx_pause = 0;  	pause_config.rx_pause = 0; -	mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; -  	spin_lock_irqsave(&bnad->bna_lock, flags); -	bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL); +	bna_enet_mtu_set(&bnad->bna.enet, +			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);  	bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);  	bna_enet_enable(&bnad->bna.enet);  	spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -2624,9 +2806,6 @@ bnad_stop(struct net_device *netdev)  	bnad_destroy_tx(bnad, 0);  	bnad_destroy_rx(bnad, 0); -	/* These config flags are cleared in the hardware */ -	bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC); -  	/* Synchronize mailbox IRQ */  	bnad_mbox_irq_sync(bnad); @@ -2664,13 +2843,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,  		}  		if (unlikely((gso_size + skb_transport_offset(skb) +  			      tcp_hdrlen(skb)) >= skb->len)) { -			txqent->hdr.wi.opcode = -				__constant_htons(BNA_TXQ_WI_SEND); +			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);  			txqent->hdr.wi.lso_mss = 0;  			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);  		} else { -			txqent->hdr.wi.opcode = -				__constant_htons(BNA_TXQ_WI_SEND_LSO); +			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);  			txqent->hdr.wi.lso_mss = htons(gso_size);  		} @@ -2684,7 +2861,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,  			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(  			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));  	} else  { -		txqent->hdr.wi.opcode =	__constant_htons(BNA_TXQ_WI_SEND); +		txqent->hdr.wi.opcode =	htons(BNA_TXQ_WI_SEND);  		txqent->hdr.wi.lso_mss = 0;  		if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { @@ -2695,11 +2872,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,  		if (skb->ip_summed == CHECKSUM_PARTIAL) {  			u8 proto = 0; -			if (skb->protocol == __constant_htons(ETH_P_IP)) +			if (skb->protocol == htons(ETH_P_IP))  				proto = ip_hdr(skb)->protocol;  #ifdef NETIF_F_IPV6_CSUM -			else if (skb->protocol == -				 __constant_htons(ETH_P_IPV6)) { +			else if (skb->protocol == htons(ETH_P_IPV6)) {  				/* nexthdr may not be TCP immediately. */  				proto = ipv6_hdr(skb)->nexthdr;  			} @@ -2768,42 +2944,42 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)  	/* Sanity checks for the skb */  	if (unlikely(skb->len <= ETH_HLEN)) { -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);  		return NETDEV_TX_OK;  	}  	if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);  		return NETDEV_TX_OK;  	}  	if (unlikely(len == 0)) { -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);  		return NETDEV_TX_OK;  	}  	tcb = bnad->tx_info[0].tcb[txq_id]; -	q_depth = tcb->q_depth; -	prod = tcb->producer_index; - -	unmap_q = tcb->unmap_q;  	/*  	 * Takes care of the Tx that is scheduled between clearing the flag  	 * and the netif_tx_stop_all_queues() call.  	 */ -	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { -		dev_kfree_skb(skb); +	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { +		dev_kfree_skb_any(skb);  		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);  		return NETDEV_TX_OK;  	} +	q_depth = tcb->q_depth; +	prod = tcb->producer_index; +	unmap_q = tcb->unmap_q; +  	vectors = 1 + skb_shinfo(skb)->nr_frags;  	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */  	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);  		return NETDEV_TX_OK;  	} @@ -2816,7 +2992,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)  			sent = bnad_txcmpl_process(bnad, tcb);  			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))  				bna_ib_ack(tcb->i_dbell, sent); -			smp_mb__before_clear_bit(); +			smp_mb__before_atomic();  			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);  		} else {  			netif_stop_queue(netdev); @@ -2843,7 +3019,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)  	/* Program the opcode, flags, frame_len, num_vectors in WI */  	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		return NETDEV_TX_OK;  	}  	txqent->hdr.wi.reserved = 0; @@ -2863,13 +3039,13 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)  	for (i = 0, vect_id = 0; i < vectors - 1; i++) {  		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; -		u16		size = skb_frag_size(frag); +		u32		size = skb_frag_size(frag);  		if (unlikely(size == 0)) {  			/* Undo the changes starting at tcb->producer_index */  			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,  				tcb->producer_index); -			dev_kfree_skb(skb); +			dev_kfree_skb_any(skb);  			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);  			return NETDEV_TX_OK;  		} @@ -2881,24 +3057,24 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)  			vect_id = 0;  			BNA_QE_INDX_INC(prod, q_depth);  			txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; -			txqent->hdr.wi_ext.opcode = -				__constant_htons(BNA_TXQ_WI_EXTENSION); +			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);  			unmap = &unmap_q[prod];  		}  		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,  					    0, size, DMA_TO_DEVICE); +		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);  		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);  		txqent->vector[vect_id].length = htons(size);  		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, -						dma_addr); +				   dma_addr);  		head_unmap->nvecs++;  	}  	if (unlikely(len != skb->len)) {  		/* Undo the changes starting at tcb->producer_index */  		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);  		return NETDEV_TX_OK;  	} @@ -2911,6 +3087,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)  	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))  		return NETDEV_TX_OK; +	skb_tx_timestamp(skb); +  	bna_txq_prod_indx_doorbell(tcb);  	smp_mb(); @@ -2937,73 +3115,128 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)  	return stats;  } +static void +bnad_set_rx_ucast_fltr(struct bnad *bnad) +{ +	struct net_device *netdev = bnad->netdev; +	int uc_count = netdev_uc_count(netdev); +	enum bna_cb_status ret; +	u8 *mac_list; +	struct netdev_hw_addr *ha; +	int entry; + +	if (netdev_uc_empty(bnad->netdev)) { +		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); +		return; +	} + +	if (uc_count > bna_attr(&bnad->bna)->num_ucmac) +		goto mode_default; + +	mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC); +	if (mac_list == NULL) +		goto mode_default; + +	entry = 0; +	netdev_for_each_uc_addr(ha, netdev) { +		memcpy(&mac_list[entry * ETH_ALEN], +		       &ha->addr[0], ETH_ALEN); +		entry++; +	} + +	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, +			mac_list, NULL); +	kfree(mac_list); + +	if (ret != BNA_CB_SUCCESS) +		goto mode_default; + +	return; + +	/* ucast packets not in UCAM are routed to default function */ +mode_default: +	bnad->cfg_flags |= BNAD_CF_DEFAULT; +	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); +} + +static void +bnad_set_rx_mcast_fltr(struct bnad *bnad) +{ +	struct net_device *netdev = bnad->netdev; +	int mc_count = netdev_mc_count(netdev); +	enum bna_cb_status ret; +	u8 *mac_list; + +	if (netdev->flags & IFF_ALLMULTI) +		goto mode_allmulti; + +	if (netdev_mc_empty(netdev)) +		return; + +	if (mc_count > bna_attr(&bnad->bna)->num_mcmac) +		goto mode_allmulti; + +	mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC); + +	if (mac_list == NULL) +		goto mode_allmulti; + +	memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN); + +	/* copy rest of the MCAST addresses */ +	bnad_netdev_mc_list_get(netdev, mac_list); +	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, +			mac_list, NULL); +	kfree(mac_list); + +	if (ret != BNA_CB_SUCCESS) +		goto mode_allmulti; + +	return; + +mode_allmulti: +	bnad->cfg_flags |= BNAD_CF_ALLMULTI; +	bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL); +} +  void  bnad_set_rx_mode(struct net_device *netdev)  {  	struct bnad *bnad = netdev_priv(netdev); -	u32	new_mask, valid_mask; +	enum bna_rxmode new_mode, mode_mask;  	unsigned long flags;  	spin_lock_irqsave(&bnad->bna_lock, flags); -	new_mask = valid_mask = 0; - -	if (netdev->flags & IFF_PROMISC) { -		if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { -			new_mask = BNAD_RXMODE_PROMISC_DEFAULT; -			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; -			bnad->cfg_flags |= BNAD_CF_PROMISC; -		} -	} else { -		if (bnad->cfg_flags & BNAD_CF_PROMISC) { -			new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; -			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; -			bnad->cfg_flags &= ~BNAD_CF_PROMISC; -		} -	} - -	if (netdev->flags & IFF_ALLMULTI) { -		if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { -			new_mask |= BNA_RXMODE_ALLMULTI; -			valid_mask |= BNA_RXMODE_ALLMULTI; -			bnad->cfg_flags |= BNAD_CF_ALLMULTI; -		} -	} else { -		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { -			new_mask &= ~BNA_RXMODE_ALLMULTI; -			valid_mask |= BNA_RXMODE_ALLMULTI; -			bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; -		} +	if (bnad->rx_info[0].rx == NULL) { +		spin_unlock_irqrestore(&bnad->bna_lock, flags); +		return;  	} -	if (bnad->rx_info[0].rx == NULL) -		goto unlock; +	/* clear bnad flags to update it with new settings */ +	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | +			BNAD_CF_ALLMULTI); -	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); - -	if (!netdev_mc_empty(netdev)) { -		u8 *mcaddr_list; -		int mc_count = netdev_mc_count(netdev); +	new_mode = 0; +	if (netdev->flags & IFF_PROMISC) { +		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT; +		bnad->cfg_flags |= BNAD_CF_PROMISC; +	} else { +		bnad_set_rx_mcast_fltr(bnad); -		/* Index 0 holds the broadcast address */ -		mcaddr_list = -			kzalloc((mc_count + 1) * ETH_ALEN, -				GFP_ATOMIC); -		if (!mcaddr_list) -			goto unlock; +		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) +			new_mode |= BNA_RXMODE_ALLMULTI; -		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); +		bnad_set_rx_ucast_fltr(bnad); -		/* Copy rest of the MC addresses */ -		bnad_netdev_mc_list_get(netdev, mcaddr_list); +		if (bnad->cfg_flags & BNAD_CF_DEFAULT) +			new_mode |= BNA_RXMODE_DEFAULT; +	} -		bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, -					mcaddr_list, NULL); +	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | +			BNA_RXMODE_ALLMULTI; +	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); -		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ -		kfree(mcaddr_list); -	} -unlock:  	spin_unlock_irqrestore(&bnad->bna_lock, flags);  } @@ -3033,14 +3266,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)  }  static int -bnad_mtu_set(struct bnad *bnad, int mtu) +bnad_mtu_set(struct bnad *bnad, int frame_size)  {  	unsigned long flags;  	init_completion(&bnad->bnad_completions.mtu_comp);  	spin_lock_irqsave(&bnad->bna_lock, flags); -	bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set); +	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);  	spin_unlock_irqrestore(&bnad->bna_lock, flags);  	wait_for_completion(&bnad->bnad_completions.mtu_comp); @@ -3051,18 +3284,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)  static int  bnad_change_mtu(struct net_device *netdev, int new_mtu)  { -	int err, mtu = netdev->mtu; +	int err, mtu;  	struct bnad *bnad = netdev_priv(netdev); +	u32 rx_count = 0, frame, new_frame;  	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)  		return -EINVAL;  	mutex_lock(&bnad->conf_mutex); +	mtu = netdev->mtu;  	netdev->mtu = new_mtu; -	mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN; -	err = bnad_mtu_set(bnad, mtu); +	frame = BNAD_FRAME_SIZE(mtu); +	new_frame = BNAD_FRAME_SIZE(new_mtu); + +	/* check if multi-buffer needs to be enabled */ +	if (BNAD_PCI_DEV_IS_CAT2(bnad) && +	    netif_running(bnad->netdev)) { +		/* only when transition is over 4K */ +		if ((frame <= 4096 && new_frame > 4096) || +		    (frame > 4096 && new_frame <= 4096)) +			rx_count = bnad_reinit_rx(bnad); +	} + +	/* rx_count > 0 - new rx created +	 *	- Linux set err = 0 and return +	 */ +	err = bnad_mtu_set(bnad, new_frame);  	if (err)  		err = -EBUSY; @@ -3112,6 +3361,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)  	return 0;  } +static int bnad_set_features(struct net_device *dev, netdev_features_t features) +{ +	struct bnad *bnad = netdev_priv(dev); +	netdev_features_t changed = features ^ dev->features; + +	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { +		unsigned long flags; + +		spin_lock_irqsave(&bnad->bna_lock, flags); + +		if (features & NETIF_F_HW_VLAN_CTAG_RX) +			bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); +		else +			bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); + +		spin_unlock_irqrestore(&bnad->bna_lock, flags); +	} + +	return 0; +} +  #ifdef CONFIG_NET_POLL_CONTROLLER  static void  bnad_netpoll(struct net_device *netdev) @@ -3159,6 +3429,7 @@ static const struct net_device_ops bnad_netdev_ops = {  	.ndo_change_mtu		= bnad_change_mtu,  	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,  	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid, +	.ndo_set_features	= bnad_set_features,  #ifdef CONFIG_NET_POLL_CONTROLLER  	.ndo_poll_controller    = bnad_netpoll  #endif @@ -3171,14 +3442,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)  	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX; +		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | +		NETIF_F_HW_VLAN_CTAG_RX;  	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |  		NETIF_F_TSO | NETIF_F_TSO6; -	netdev->features |= netdev->hw_features | -		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; +	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;  	if (using_dac)  		netdev->features |= NETIF_F_HIGHDMA; @@ -3212,7 +3483,6 @@ bnad_init(struct bnad *bnad,  	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);  	if (!bnad->bar0) {  		dev_err(&pdev->dev, "ioremap for bar0 failed\n"); -		pci_set_drvdata(pdev, NULL);  		return -ENOMEM;  	}  	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, @@ -3263,7 +3533,6 @@ bnad_uninit(struct bnad *bnad)  	if (bnad->bar0)  		iounmap(bnad->bar0); -	pci_set_drvdata(bnad->pcidev, NULL);  }  /* @@ -3300,17 +3569,12 @@ bnad_pci_init(struct bnad *bnad,  	err = pci_request_regions(pdev, BNAD_NAME);  	if (err)  		goto disable_device; -	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && -	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { +	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {  		*using_dac = true;  	} else { -		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); -		if (err) { -			err = dma_set_coherent_mask(&pdev->dev, -						    DMA_BIT_MASK(32)); -			if (err) -				goto release_regions; -		} +		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); +		if (err) +			goto release_regions;  		*using_dac = false;  	}  	pci_set_master(pdev);  | 
