1 /*****************************************************************************
5 * $Date: 2005/06/21 18:29:48 $ *
8 * part of the Chelsio 10Gb Ethernet Driver. *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
22 * http://www.chelsio.com *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
27 * Maintainers: maintainers@chelsio.com *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
38 ****************************************************************************/
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/pci.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/if_vlan.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
53 #include <linux/if_arp.h>
62 #include <linux/tcp.h>
66 #define SGE_FREELQ_N 2
67 #define SGE_CMDQ0_E_N 1024
68 #define SGE_CMDQ1_E_N 128
69 #define SGE_FREEL_SIZE 4096
70 #define SGE_JUMBO_FREEL_SIZE 512
71 #define SGE_FREEL_REFILL_THRESH 16
72 #define SGE_RESPQ_E_N 1024
73 #define SGE_INTRTIMER_NRES 1000
74 #define SGE_RX_COPY_THRES 256
75 #define SGE_RX_SM_BUF_SIZE 1536
77 # define SGE_RX_DROP_THRES 2
79 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
82 * Period of the TX buffer reclaim timer. This timer does not need to run
83 * frequently as TX buffers are usually reclaimed by new TX packets.
85 #define TX_RECLAIM_PERIOD (HZ / 4)
88 # define NET_IP_ALIGN 2
91 #define M_CMD_LEN 0x7fffffff
92 #define V_CMD_LEN(v) (v)
93 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
94 #define V_CMD_GEN1(v) ((v) << 31)
95 #define V_CMD_GEN2(v) (v)
96 #define F_CMD_DATAVALID (1 << 1)
97 #define F_CMD_SOP (1 << 2)
98 #define V_CMD_EOP(v) ((v) << 3)
101 * Command queue, receive buffer list, and response queue descriptors.
103 #if defined(__BIG_ENDIAN_BITFIELD)
120 u32 Cmdq1CreditReturn : 5;
121 u32 Cmdq1DmaComplete : 5;
122 u32 Cmdq0CreditReturn : 5;
123 u32 Cmdq0DmaComplete : 5;
130 u32 GenerationBit : 1;
133 #elif defined(__LITTLE_ENDIAN_BITFIELD)
150 u32 GenerationBit : 1;
157 u32 Cmdq0DmaComplete : 5;
158 u32 Cmdq0CreditReturn : 5;
159 u32 Cmdq1DmaComplete : 5;
160 u32 Cmdq1CreditReturn : 5;
166 * SW Context Command and Freelist Queue Descriptors
170 DECLARE_PCI_UNMAP_ADDR(dma_addr);
171 DECLARE_PCI_UNMAP_LEN(dma_len);
176 DECLARE_PCI_UNMAP_ADDR(dma_addr);
177 DECLARE_PCI_UNMAP_LEN(dma_len);
181 * SW command, freelist and response rings
184 unsigned long status; /* HW DMA fetch status */
185 unsigned int in_use; /* # of in-use command descriptors */
186 unsigned int size; /* # of descriptors */
187 unsigned int processed; /* total # of descs HW has processed */
188 unsigned int cleaned; /* total # of descs SW has reclaimed */
189 unsigned int stop_thres; /* SW TX queue suspend threshold */
190 u16 pidx; /* producer index (SW) */
191 u16 cidx; /* consumer index (HW) */
192 u8 genbit; /* current generation (=valid) bit */
193 u8 sop; /* is next entry start of packet? */
194 struct cmdQ_e *entries; /* HW command descriptor Q */
195 struct cmdQ_ce *centries; /* SW command context descriptor Q */
196 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
201 unsigned int credits; /* # of available RX buffers */
202 unsigned int size; /* free list capacity */
203 u16 pidx; /* producer index (SW) */
204 u16 cidx; /* consumer index (HW) */
205 u16 rx_buffer_size; /* Buffer size on this free list */
206 u16 dma_offset; /* DMA offset to align IP headers */
207 u16 recycleq_idx; /* skb recycle q to use */
208 u8 genbit; /* current generation (=valid) bit */
209 struct freelQ_e *entries; /* HW freelist descriptor Q */
210 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
211 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
215 unsigned int credits; /* credits to be returned to SGE */
216 unsigned int size; /* # of response Q descriptors */
217 u16 cidx; /* consumer index (SW) */
218 u8 genbit; /* current generation(=valid) bit */
219 struct respQ_e *entries; /* HW response descriptor Q */
220 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
223 /* Bit flags for cmdQ.status */
225 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
226 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
230 * Main SGE data structure
232 * Interrupts are handled by a single CPU and it is likely that on a MP system
233 * the application is migrated to another CPU. In that scenario, we try to
234 * seperate the RX(in irq context) and TX state in order to decrease memory
238 struct adapter *adapter; /* adapter backpointer */
239 struct net_device *netdev; /* netdevice backpointer */
240 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
241 struct respQ respQ; /* response Q */
242 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
243 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
244 unsigned int jumbo_fl; /* jumbo freelist Q index */
245 unsigned int intrtimer_nres; /* no-resource interrupt timer */
246 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
247 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
248 struct timer_list espibug_timer;
249 unsigned int espibug_timeout;
250 struct sk_buff *espibug_skb;
251 u32 sge_control; /* shadow value of sge control reg */
252 struct sge_intr_counts stats;
253 struct sge_port_stats port_stats[MAX_NPORTS];
254 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
258 * PIO to indicate that memory mapped Q contains valid descriptor(s).
260 static inline void doorbell_pio(struct adapter *adapter, u32 val)
263 writel(val, adapter->regs + A_SG_DOORBELL);
267 * Frees all RX buffers on the freelist Q. The caller must make sure that
268 * the SGE is turned off before calling this function.
270 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
272 unsigned int cidx = q->cidx;
274 while (q->credits--) {
275 struct freelQ_ce *ce = &q->centries[cidx];
277 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
278 pci_unmap_len(ce, dma_len),
280 dev_kfree_skb(ce->skb);
282 if (++cidx == q->size)
288 * Free RX free list and response queue resources.
290 static void free_rx_resources(struct sge *sge)
292 struct pci_dev *pdev = sge->adapter->pdev;
293 unsigned int size, i;
295 if (sge->respQ.entries) {
296 size = sizeof(struct respQ_e) * sge->respQ.size;
297 pci_free_consistent(pdev, size, sge->respQ.entries,
298 sge->respQ.dma_addr);
301 for (i = 0; i < SGE_FREELQ_N; i++) {
302 struct freelQ *q = &sge->freelQ[i];
305 free_freelQ_buffers(pdev, q);
309 size = sizeof(struct freelQ_e) * q->size;
310 pci_free_consistent(pdev, size, q->entries,
317 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
320 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
322 struct pci_dev *pdev = sge->adapter->pdev;
323 unsigned int size, i;
325 for (i = 0; i < SGE_FREELQ_N; i++) {
326 struct freelQ *q = &sge->freelQ[i];
329 q->size = p->freelQ_size[i];
330 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
331 size = sizeof(struct freelQ_e) * q->size;
332 q->entries = (struct freelQ_e *)
333 pci_alloc_consistent(pdev, size, &q->dma_addr);
336 memset(q->entries, 0, size);
337 size = sizeof(struct freelQ_ce) * q->size;
338 q->centries = kzalloc(size, GFP_KERNEL);
344 * Calculate the buffer sizes for the two free lists. FL0 accommodates
345 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
346 * including all the sk_buff overhead.
348 * Note: For T2 FL0 and FL1 are reversed.
350 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
351 sizeof(struct cpl_rx_data) +
352 sge->freelQ[!sge->jumbo_fl].dma_offset;
353 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
357 * Setup which skb recycle Q should be used when recycling buffers from
360 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
361 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
363 sge->respQ.genbit = 1;
364 sge->respQ.size = SGE_RESPQ_E_N;
365 sge->respQ.credits = 0;
366 size = sizeof(struct respQ_e) * sge->respQ.size;
367 sge->respQ.entries = (struct respQ_e *)
368 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
369 if (!sge->respQ.entries)
371 memset(sge->respQ.entries, 0, size);
375 free_rx_resources(sge);
380 * Reclaims n TX descriptors and frees the buffers associated with them.
382 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
385 struct pci_dev *pdev = sge->adapter->pdev;
386 unsigned int cidx = q->cidx;
389 ce = &q->centries[cidx];
392 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
393 pci_unmap_len(ce, dma_len),
396 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
397 pci_unmap_len(ce, dma_len),
401 dev_kfree_skb(ce->skb);
405 if (++cidx == q->size) {
416 * Assumes that SGE is stopped and all interrupts are disabled.
418 static void free_tx_resources(struct sge *sge)
420 struct pci_dev *pdev = sge->adapter->pdev;
421 unsigned int size, i;
423 for (i = 0; i < SGE_CMDQ_N; i++) {
424 struct cmdQ *q = &sge->cmdQ[i];
428 free_cmdQ_buffers(sge, q, q->in_use);
432 size = sizeof(struct cmdQ_e) * q->size;
433 pci_free_consistent(pdev, size, q->entries,
440 * Allocates basic TX resources, consisting of memory mapped command Qs.
442 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
444 struct pci_dev *pdev = sge->adapter->pdev;
445 unsigned int size, i;
447 for (i = 0; i < SGE_CMDQ_N; i++) {
448 struct cmdQ *q = &sge->cmdQ[i];
452 q->size = p->cmdQ_size[i];
455 q->processed = q->cleaned = 0;
457 spin_lock_init(&q->lock);
458 size = sizeof(struct cmdQ_e) * q->size;
459 q->entries = (struct cmdQ_e *)
460 pci_alloc_consistent(pdev, size, &q->dma_addr);
463 memset(q->entries, 0, size);
464 size = sizeof(struct cmdQ_ce) * q->size;
465 q->centries = kzalloc(size, GFP_KERNEL);
471 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
472 * only. For queue 0 set the stop threshold so we can handle one more
473 * packet from each port, plus reserve an additional 24 entries for
474 * Ethernet packets only. Queue 1 never suspends nor do we reserve
475 * space for Ethernet packets.
477 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
482 free_tx_resources(sge);
486 static inline void setup_ring_params(struct adapter *adapter, u64 addr,
487 u32 size, int base_reg_lo,
488 int base_reg_hi, int size_reg)
490 writel((u32)addr, adapter->regs + base_reg_lo);
491 writel(addr >> 32, adapter->regs + base_reg_hi);
492 writel(size, adapter->regs + size_reg);
496 * Enable/disable VLAN acceleration.
498 void t1_set_vlan_accel(struct adapter *adapter, int on_off)
500 struct sge *sge = adapter->sge;
502 sge->sge_control &= ~F_VLAN_XTRACT;
504 sge->sge_control |= F_VLAN_XTRACT;
505 if (adapter->open_device_map) {
506 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
507 readl(adapter->regs + A_SG_CONTROL); /* flush */
512 * Programs the various SGE registers. However, the engine is not yet enabled,
513 * but sge->sge_control is setup and ready to go.
515 static void configure_sge(struct sge *sge, struct sge_params *p)
517 struct adapter *ap = sge->adapter;
519 writel(0, ap->regs + A_SG_CONTROL);
520 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
521 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
522 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
523 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
524 setup_ring_params(ap, sge->freelQ[0].dma_addr,
525 sge->freelQ[0].size, A_SG_FL0BASELWR,
526 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
527 setup_ring_params(ap, sge->freelQ[1].dma_addr,
528 sge->freelQ[1].size, A_SG_FL1BASELWR,
529 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
531 /* The threshold comparison uses <. */
532 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
534 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
535 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
536 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
538 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
539 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
540 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
541 F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
542 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
544 #if defined(__BIG_ENDIAN_BITFIELD)
545 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
548 /* Initialize no-resource timer */
549 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
551 t1_sge_set_coalesce_params(sge, p);
555 * Return the payload capacity of the jumbo free-list buffers.
557 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
559 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
560 sge->freelQ[sge->jumbo_fl].dma_offset -
561 sizeof(struct cpl_rx_data);
565 * Frees all SGE related resources and the sge structure itself
567 void t1_sge_destroy(struct sge *sge)
569 if (sge->espibug_skb)
570 kfree_skb(sge->espibug_skb);
572 free_tx_resources(sge);
573 free_rx_resources(sge);
578 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
579 * context Q) until the Q is full or alloc_skb fails.
581 * It is possible that the generation bits already match, indicating that the
582 * buffer is already valid and nothing needs to be done. This happens when we
583 * copied a received buffer into a new sk_buff during the interrupt processing.
585 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
586 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
589 static void refill_free_list(struct sge *sge, struct freelQ *q)
591 struct pci_dev *pdev = sge->adapter->pdev;
592 struct freelQ_ce *ce = &q->centries[q->pidx];
593 struct freelQ_e *e = &q->entries[q->pidx];
594 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
597 while (q->credits < q->size) {
601 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
605 skb_reserve(skb, q->dma_offset);
606 mapping = pci_map_single(pdev, skb->data, dma_len,
609 pci_unmap_addr_set(ce, dma_addr, mapping);
610 pci_unmap_len_set(ce, dma_len, dma_len);
611 e->addr_lo = (u32)mapping;
612 e->addr_hi = (u64)mapping >> 32;
613 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
615 e->gen2 = V_CMD_GEN2(q->genbit);
619 if (++q->pidx == q->size) {
631 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
632 * of both rings, we go into 'few interrupt mode' in order to give the system
633 * time to free up resources.
635 static void freelQs_empty(struct sge *sge)
637 struct adapter *adapter = sge->adapter;
638 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
641 refill_free_list(sge, &sge->freelQ[0]);
642 refill_free_list(sge, &sge->freelQ[1]);
644 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
645 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
646 irq_reg |= F_FL_EXHAUSTED;
647 irqholdoff_reg = sge->fixed_intrtimer;
649 /* Clear the F_FL_EXHAUSTED interrupts for now */
650 irq_reg &= ~F_FL_EXHAUSTED;
651 irqholdoff_reg = sge->intrtimer_nres;
653 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
654 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
656 /* We reenable the Qs to force a freelist GTS interrupt later */
657 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
660 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
661 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
662 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
663 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
666 * Disable SGE Interrupts
668 void t1_sge_intr_disable(struct sge *sge)
670 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
672 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
673 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
677 * Enable SGE interrupts.
679 void t1_sge_intr_enable(struct sge *sge)
681 u32 en = SGE_INT_ENABLE;
682 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
684 if (sge->adapter->flags & TSO_CAPABLE)
685 en &= ~F_PACKET_TOO_BIG;
686 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
687 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
691 * Clear SGE interrupts.
693 void t1_sge_intr_clear(struct sge *sge)
695 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
696 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
700 * SGE 'Error' interrupt handler
702 int t1_sge_intr_error_handler(struct sge *sge)
704 struct adapter *adapter = sge->adapter;
705 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
707 if (adapter->flags & TSO_CAPABLE)
708 cause &= ~F_PACKET_TOO_BIG;
709 if (cause & F_RESPQ_EXHAUSTED)
710 sge->stats.respQ_empty++;
711 if (cause & F_RESPQ_OVERFLOW) {
712 sge->stats.respQ_overflow++;
713 CH_ALERT("%s: SGE response queue overflow\n",
716 if (cause & F_FL_EXHAUSTED) {
717 sge->stats.freelistQ_empty++;
720 if (cause & F_PACKET_TOO_BIG) {
721 sge->stats.pkt_too_big++;
722 CH_ALERT("%s: SGE max packet size exceeded\n",
725 if (cause & F_PACKET_MISMATCH) {
726 sge->stats.pkt_mismatch++;
727 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
729 if (cause & SGE_INT_FATAL)
730 t1_fatal_err(adapter);
732 writel(cause, adapter->regs + A_SG_INT_CAUSE);
736 const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
741 const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
743 return &sge->port_stats[port];
747 * recycle_fl_buf - recycle a free list buffer
749 * @idx: index of buffer to recycle
751 * Recycles the specified buffer on the given free list by adding it at
752 * the next available slot on the list.
754 static void recycle_fl_buf(struct freelQ *fl, int idx)
756 struct freelQ_e *from = &fl->entries[idx];
757 struct freelQ_e *to = &fl->entries[fl->pidx];
759 fl->centries[fl->pidx] = fl->centries[idx];
760 to->addr_lo = from->addr_lo;
761 to->addr_hi = from->addr_hi;
762 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
764 to->gen2 = V_CMD_GEN2(fl->genbit);
767 if (++fl->pidx == fl->size) {
774 * get_packet - return the next ingress packet buffer
775 * @pdev: the PCI device that received the packet
776 * @fl: the SGE free list holding the packet
777 * @len: the actual packet length, excluding any SGE padding
778 * @dma_pad: padding at beginning of buffer left by SGE DMA
779 * @skb_pad: padding to be used if the packet is copied
780 * @copy_thres: length threshold under which a packet should be copied
781 * @drop_thres: # of remaining buffers before we start dropping packets
783 * Get the next packet from a free list and complete setup of the
784 * sk_buff. If the packet is small we make a copy and recycle the
785 * original buffer, otherwise we use the original buffer itself. If a
786 * positive drop threshold is supplied packets are dropped and their
787 * buffers recycled if (a) the number of remaining buffers is under the
788 * threshold and the packet is too big to copy, or (b) the packet should
789 * be copied but there is no memory for the copy.
791 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
792 struct freelQ *fl, unsigned int len,
793 int dma_pad, int skb_pad,
794 unsigned int copy_thres,
795 unsigned int drop_thres)
798 struct freelQ_ce *ce = &fl->centries[fl->cidx];
800 if (len < copy_thres) {
801 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
802 if (likely(skb != NULL)) {
803 skb_reserve(skb, skb_pad);
805 pci_dma_sync_single_for_cpu(pdev,
806 pci_unmap_addr(ce, dma_addr),
807 pci_unmap_len(ce, dma_len),
809 memcpy(skb->data, ce->skb->data + dma_pad, len);
810 pci_dma_sync_single_for_device(pdev,
811 pci_unmap_addr(ce, dma_addr),
812 pci_unmap_len(ce, dma_len),
814 } else if (!drop_thres)
817 recycle_fl_buf(fl, fl->cidx);
821 if (fl->credits < drop_thres) {
822 recycle_fl_buf(fl, fl->cidx);
827 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
828 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
830 skb_reserve(skb, dma_pad);
836 * unexpected_offload - handle an unexpected offload packet
837 * @adapter: the adapter
838 * @fl: the free list that received the packet
840 * Called when we receive an unexpected offload packet (e.g., the TOE
841 * function is disabled or the card is a NIC). Prints a message and
842 * recycles the buffer.
844 static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
846 struct freelQ_ce *ce = &fl->centries[fl->cidx];
847 struct sk_buff *skb = ce->skb;
849 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
850 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
851 CH_ERR("%s: unexpected offload packet, cmd %u\n",
852 adapter->name, *skb->data);
853 recycle_fl_buf(fl, fl->cidx);
857 * Write the command descriptors to transmit the given skb starting at
858 * descriptor pidx with the given generation.
860 static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
861 unsigned int pidx, unsigned int gen,
865 struct cmdQ_e *e, *e1;
867 unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
869 mapping = pci_map_single(adapter->pdev, skb->data,
870 skb->len - skb->data_len, PCI_DMA_TODEVICE);
871 ce = &q->centries[pidx];
873 pci_unmap_addr_set(ce, dma_addr, mapping);
874 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
876 flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
878 e = &q->entries[pidx];
879 e->addr_lo = (u32)mapping;
880 e->addr_hi = (u64)mapping >> 32;
881 e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
882 for (e1 = e, i = 0; nfrags--; i++) {
883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
887 if (++pidx == q->size) {
894 mapping = pci_map_page(adapter->pdev, frag->page,
895 frag->page_offset, frag->size,
898 pci_unmap_addr_set(ce, dma_addr, mapping);
899 pci_unmap_len_set(ce, dma_len, frag->size);
901 e1->addr_lo = (u32)mapping;
902 e1->addr_hi = (u64)mapping >> 32;
903 e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
904 e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
914 * Clean up completed Tx buffers.
916 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
918 unsigned int reclaim = q->processed - q->cleaned;
921 free_cmdQ_buffers(sge, q, reclaim);
922 q->cleaned += reclaim;
926 #ifndef SET_ETHTOOL_OPS
927 # define __netif_rx_complete(dev) netif_rx_complete(dev)
931 * sge_rx - process an ingress ethernet packet
932 * @sge: the sge structure
933 * @fl: the free list that contains the packet buffer
934 * @len: the packet length
936 * Process an ingress ethernet pakcet and deliver it to the stack.
938 static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
941 struct cpl_rx_pkt *p;
942 struct adapter *adapter = sge->adapter;
944 sge->stats.ethernet_pkts++;
945 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
946 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
949 sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
953 p = (struct cpl_rx_pkt *)skb->data;
954 skb_pull(skb, sizeof(*p));
955 skb->dev = adapter->port[p->iff].dev;
956 skb->dev->last_rx = jiffies;
957 skb->protocol = eth_type_trans(skb, skb->dev);
958 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
959 skb->protocol == htons(ETH_P_IP) &&
960 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
961 sge->port_stats[p->iff].rx_cso_good++;
962 skb->ip_summed = CHECKSUM_UNNECESSARY;
964 skb->ip_summed = CHECKSUM_NONE;
966 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
967 sge->port_stats[p->iff].vlan_xtract++;
968 if (adapter->params.sge.polling)
969 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
972 vlan_hwaccel_rx(skb, adapter->vlan_grp,
974 } else if (adapter->params.sge.polling)
975 netif_receive_skb(skb);
982 * Returns true if a command queue has enough available descriptors that
983 * we can resume Tx operation after temporarily disabling its packet queue.
985 static inline int enough_free_Tx_descs(const struct cmdQ *q)
987 unsigned int r = q->processed - q->cleaned;
989 return q->in_use - r < (q->size >> 1);
993 * Called when sufficient space has become available in the SGE command queues
994 * after the Tx packet schedulers have been suspended to restart the Tx path.
996 static void restart_tx_queues(struct sge *sge)
998 struct adapter *adap = sge->adapter;
1000 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1003 for_each_port(adap, i) {
1004 struct net_device *nd = adap->port[i].dev;
1006 if (test_and_clear_bit(nd->if_port,
1007 &sge->stopped_tx_queues) &&
1008 netif_running(nd)) {
1009 sge->stats.cmdQ_restarted[2]++;
1010 netif_wake_queue(nd);
1017 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1020 static unsigned int update_tx_info(struct adapter *adapter,
1024 struct sge *sge = adapter->sge;
1025 struct cmdQ *cmdq = &sge->cmdQ[0];
1027 cmdq->processed += pr0;
1029 if (flags & F_CMDQ0_ENABLE) {
1030 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1032 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1033 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1034 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1035 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1037 flags &= ~F_CMDQ0_ENABLE;
1040 if (unlikely(sge->stopped_tx_queues != 0))
1041 restart_tx_queues(sge);
1047 * Process SGE responses, up to the supplied budget. Returns the number of
1048 * responses processed. A negative budget is effectively unlimited.
1050 static int process_responses(struct adapter *adapter, int budget)
1052 struct sge *sge = adapter->sge;
1053 struct respQ *q = &sge->respQ;
1054 struct respQ_e *e = &q->entries[q->cidx];
1055 int budget_left = budget;
1056 unsigned int flags = 0;
1057 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1060 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1061 flags |= e->Qsleeping;
1063 cmdq_processed[0] += e->Cmdq0CreditReturn;
1064 cmdq_processed[1] += e->Cmdq1CreditReturn;
1066 /* We batch updates to the TX side to avoid cacheline
1067 * ping-pong of TX state information on MP where the sender
1068 * might run on a different CPU than this function...
1070 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1071 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1072 cmdq_processed[0] = 0;
1074 if (unlikely(cmdq_processed[1] > 16)) {
1075 sge->cmdQ[1].processed += cmdq_processed[1];
1076 cmdq_processed[1] = 0;
1078 if (likely(e->DataValid)) {
1079 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1081 BUG_ON(!e->Sop || !e->Eop);
1082 if (unlikely(e->Offload))
1083 unexpected_offload(adapter, fl);
1085 sge_rx(sge, fl, e->BufferLength);
1088 * Note: this depends on each packet consuming a
1089 * single free-list buffer; cf. the BUG above.
1091 if (++fl->cidx == fl->size)
1093 if (unlikely(--fl->credits <
1094 fl->size - SGE_FREEL_REFILL_THRESH))
1095 refill_free_list(sge, fl);
1097 sge->stats.pure_rsps++;
1100 if (unlikely(++q->cidx == q->size)) {
1107 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1108 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1114 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1115 sge->cmdQ[1].processed += cmdq_processed[1];
1117 budget -= budget_left;
1122 * A simpler version of process_responses() that handles only pure (i.e.,
1123 * non data-carrying) responses. Such respones are too light-weight to justify
1124 * calling a softirq when using NAPI, so we handle them specially in hard
1125 * interrupt context. The function is called with a pointer to a response,
1126 * which the caller must ensure is a valid pure response. Returns 1 if it
1127 * encounters a valid data-carrying response, 0 otherwise.
1129 static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1131 struct sge *sge = adapter->sge;
1132 struct respQ *q = &sge->respQ;
1133 unsigned int flags = 0;
1134 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1137 flags |= e->Qsleeping;
1139 cmdq_processed[0] += e->Cmdq0CreditReturn;
1140 cmdq_processed[1] += e->Cmdq1CreditReturn;
1143 if (unlikely(++q->cidx == q->size)) {
1150 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1151 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1154 sge->stats.pure_rsps++;
1155 } while (e->GenerationBit == q->genbit && !e->DataValid);
1157 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1158 sge->cmdQ[1].processed += cmdq_processed[1];
1160 return e->GenerationBit == q->genbit;
1164 * Handler for new data events when using NAPI. This does not need any locking
1165 * or protection from interrupts as data interrupts are off at this point and
1166 * other adapter interrupts do not interfere.
1168 static int t1_poll(struct net_device *dev, int *budget)
1170 struct adapter *adapter = dev->priv;
1171 int effective_budget = min(*budget, dev->quota);
1173 int work_done = process_responses(adapter, effective_budget);
1174 *budget -= work_done;
1175 dev->quota -= work_done;
1177 if (work_done >= effective_budget)
1180 __netif_rx_complete(dev);
1183 * Because we don't atomically flush the following write it is
1184 * possible that in very rare cases it can reach the device in a way
1185 * that races with a new response being written plus an error interrupt
1186 * causing the NAPI interrupt handler below to return unhandled status
1187 * to the OS. To protect against this would require flushing the write
1188 * and doing both the write and the flush with interrupts off. Way too
1189 * expensive and unjustifiable given the rarity of the race.
1191 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1196 * Returns true if the device is already scheduled for polling.
1198 static inline int napi_is_scheduled(struct net_device *dev)
1200 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1204 * NAPI version of the main interrupt handler.
1206 static irqreturn_t t1_interrupt_napi(int irq, void *data)
1209 struct adapter *adapter = data;
1210 struct sge *sge = adapter->sge;
1211 struct respQ *q = &adapter->sge->respQ;
1214 * Clear the SGE_DATA interrupt first thing. Normally the NAPI
1215 * handler has control of the response queue and the interrupt handler
1216 * can look at the queue reliably only once it knows NAPI is off.
1217 * We can't wait that long to clear the SGE_DATA interrupt because we
1218 * could race with t1_poll rearming the SGE interrupt, so we need to
1219 * clear the interrupt speculatively and really early on.
1221 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1223 spin_lock(&adapter->async_lock);
1224 if (!napi_is_scheduled(sge->netdev)) {
1225 struct respQ_e *e = &q->entries[q->cidx];
1227 if (e->GenerationBit == q->genbit) {
1229 process_pure_responses(adapter, e)) {
1230 if (likely(__netif_rx_schedule_prep(sge->netdev)))
1231 __netif_rx_schedule(sge->netdev);
1232 else if (net_ratelimit())
1234 "NAPI schedule failure!\n");
1236 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1240 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1242 if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
1243 printk(KERN_ERR "data interrupt while NAPI running\n");
1245 handled = t1_slow_intr_handler(adapter);
1247 sge->stats.unhandled_irqs++;
1249 spin_unlock(&adapter->async_lock);
1250 return IRQ_RETVAL(handled != 0);
1254 * Main interrupt handler, optimized assuming that we took a 'DATA'
1257 * 1. Clear the interrupt
1258 * 2. Loop while we find valid descriptors and process them; accumulate
1259 * information that can be processed after the loop
1260 * 3. Tell the SGE at which index we stopped processing descriptors
1261 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1262 * outstanding TX buffers waiting, replenish RX buffers, potentially
1263 * reenable upper layers if they were turned off due to lack of TX
1264 * resources which are available again.
1265 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1266 * let the slow_intr_handler run and do error handling.
1268 static irqreturn_t t1_interrupt(int irq, void *cookie)
1272 struct adapter *adapter = cookie;
1273 struct respQ *Q = &adapter->sge->respQ;
1275 spin_lock(&adapter->async_lock);
1276 e = &Q->entries[Q->cidx];
1279 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1281 if (likely(e->GenerationBit == Q->genbit))
1282 work_done = process_responses(adapter, -1);
1284 work_done = t1_slow_intr_handler(adapter);
1287 * The unconditional clearing of the PL_CAUSE above may have raced
1288 * with DMA completion and the corresponding generation of a response
1289 * to cause us to miss the resulting data interrupt. The next write
1290 * is also unconditional to recover the missed interrupt and render
1291 * this race harmless.
1293 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1296 adapter->sge->stats.unhandled_irqs++;
1297 spin_unlock(&adapter->async_lock);
1298 return IRQ_RETVAL(work_done != 0);
1301 irq_handler_t t1_select_intr_handler(adapter_t *adapter)
1303 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1307 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1309 * The code figures out how many entries the sk_buff will require in the
1310 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1311 * has complete. Then, it doesn't access the global structure anymore, but
1312 * uses the corresponding fields on the stack. In conjuction with a spinlock
1313 * around that code, we can make the function reentrant without holding the
1314 * lock when we actually enqueue (which might be expensive, especially on
1315 * architectures with IO MMUs).
1317 * This runs with softirqs disabled.
1319 static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1320 unsigned int qid, struct net_device *dev)
1322 struct sge *sge = adapter->sge;
1323 struct cmdQ *q = &sge->cmdQ[qid];
1324 unsigned int credits, pidx, genbit, count;
1326 spin_lock(&q->lock);
1327 reclaim_completed_tx(sge, q);
1330 credits = q->size - q->in_use;
1331 count = 1 + skb_shinfo(skb)->nr_frags;
1333 { /* Ethernet packet */
1334 if (unlikely(credits < count)) {
1335 netif_stop_queue(dev);
1336 set_bit(dev->if_port, &sge->stopped_tx_queues);
1337 sge->stats.cmdQ_full[2]++;
1338 spin_unlock(&q->lock);
1339 if (!netif_queue_stopped(dev))
1340 CH_ERR("%s: Tx ring full while queue awake!\n",
1342 return NETDEV_TX_BUSY;
1344 if (unlikely(credits - count < q->stop_thres)) {
1345 sge->stats.cmdQ_full[2]++;
1346 netif_stop_queue(dev);
1347 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 if (q->pidx >= q->size) {
1357 spin_unlock(&q->lock);
1359 write_tx_descs(adapter, skb, pidx, genbit, q);
1362 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1363 * the doorbell if the Q is asleep. There is a natural race, where
1364 * the hardware is going to sleep just after we checked, however,
1365 * then the interrupt handler will detect the outstanding TX packet
1366 * and ring the doorbell for us.
1369 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1371 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1372 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1373 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1374 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1377 return NETDEV_TX_OK;
1380 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1383 * eth_hdr_len - return the length of an Ethernet header
1384 * @data: pointer to the start of the Ethernet header
1386 * Returns the length of an Ethernet header, including optional VLAN tag.
1388 static inline int eth_hdr_len(const void *data)
1390 const struct ethhdr *e = data;
1392 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1396 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1398 int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1400 struct adapter *adapter = dev->priv;
1401 struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
1402 struct sge *sge = adapter->sge;
1403 struct cpl_tx_pkt *cpl;
1406 if (skb_is_gso(skb)) {
1408 struct cpl_tx_pkt_lso *hdr;
1412 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1413 CPL_ETH_II : CPL_ETH_II_VLAN;
1415 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1416 hdr->opcode = CPL_TX_PKT_LSO;
1417 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1418 hdr->ip_hdr_words = skb->nh.iph->ihl;
1419 hdr->tcp_hdr_words = skb->h.th->doff;
1420 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1421 skb_shinfo(skb)->gso_size));
1422 hdr->len = htonl(skb->len - sizeof(*hdr));
1423 cpl = (struct cpl_tx_pkt *)hdr;
1424 sge->stats.tx_lso_pkts++;
1429 * Packets shorter than ETH_HLEN can break the MAC, drop them
1430 * early. Also, we may get oversized packets because some
1431 * parts of the kernel don't handle our unusual hard_header_len
1432 * right, drop those too.
1434 if (unlikely(skb->len < ETH_HLEN ||
1435 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1436 dev_kfree_skb_any(skb);
1437 return NETDEV_TX_OK;
1441 * We are using a non-standard hard_header_len and some kernel
1442 * components, such as pktgen, do not handle it right.
1443 * Complain when this happens but try to fix things up.
1445 if (unlikely(skb_headroom(skb) <
1446 dev->hard_header_len - ETH_HLEN)) {
1447 struct sk_buff *orig_skb = skb;
1449 if (net_ratelimit())
1450 printk(KERN_ERR "%s: inadequate headroom in "
1451 "Tx packet\n", dev->name);
1452 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1453 dev_kfree_skb_any(orig_skb);
1455 return NETDEV_TX_OK;
1458 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1459 skb->ip_summed == CHECKSUM_PARTIAL &&
1460 skb->nh.iph->protocol == IPPROTO_UDP)
1461 if (unlikely(skb_checksum_help(skb))) {
1462 dev_kfree_skb_any(skb);
1463 return NETDEV_TX_OK;
1466 /* Hmmm, assuming to catch the gratious arp... and we'll use
1467 * it to flush out stuck espi packets...
1469 if (unlikely(!adapter->sge->espibug_skb)) {
1470 if (skb->protocol == htons(ETH_P_ARP) &&
1471 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
1472 adapter->sge->espibug_skb = skb;
1473 /* We want to re-use this skb later. We
1474 * simply bump the reference count and it
1475 * will not be freed...
1481 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1482 cpl->opcode = CPL_TX_PKT;
1483 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1484 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1485 /* the length field isn't used so don't bother setting it */
1487 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1488 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_PARTIAL);
1489 sge->stats.tx_reg_pkts++;
1491 cpl->iff = dev->if_port;
1493 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1494 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1495 cpl->vlan_valid = 1;
1496 cpl->vlan = htons(vlan_tx_tag_get(skb));
1500 cpl->vlan_valid = 0;
1502 dev->trans_start = jiffies;
1503 return t1_sge_tx(skb, adapter, 0, dev);
1507 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1509 static void sge_tx_reclaim_cb(unsigned long data)
1512 struct sge *sge = (struct sge *)data;
1514 for (i = 0; i < SGE_CMDQ_N; ++i) {
1515 struct cmdQ *q = &sge->cmdQ[i];
1517 if (!spin_trylock(&q->lock))
1520 reclaim_completed_tx(sge, q);
1521 if (i == 0 && q->in_use) /* flush pending credits */
1522 writel(F_CMDQ0_ENABLE,
1523 sge->adapter->regs + A_SG_DOORBELL);
1525 spin_unlock(&q->lock);
1527 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1531 * Propagate changes of the SGE coalescing parameters to the HW.
1533 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1535 sge->netdev->poll = t1_poll;
1536 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1537 core_ticks_per_usec(sge->adapter);
1538 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1543 * Allocates both RX and TX resources and configures the SGE. However,
1544 * the hardware is not enabled yet.
1546 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1548 if (alloc_rx_resources(sge, p))
1550 if (alloc_tx_resources(sge, p)) {
1551 free_rx_resources(sge);
1554 configure_sge(sge, p);
1557 * Now that we have sized the free lists calculate the payload
1558 * capacity of the large buffers. Other parts of the driver use
1559 * this to set the max offload coalescing size so that RX packets
1560 * do not overflow our large buffers.
1562 p->large_buf_capacity = jumbo_payload_capacity(sge);
1567 * Disables the DMA engine.
1569 void t1_sge_stop(struct sge *sge)
1571 writel(0, sge->adapter->regs + A_SG_CONTROL);
1572 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1573 if (is_T2(sge->adapter))
1574 del_timer_sync(&sge->espibug_timer);
1575 del_timer_sync(&sge->tx_reclaim_timer);
1579 * Enables the DMA engine.
1581 void t1_sge_start(struct sge *sge)
1583 refill_free_list(sge, &sge->freelQ[0]);
1584 refill_free_list(sge, &sge->freelQ[1]);
1586 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1587 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1588 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1590 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1592 if (is_T2(sge->adapter))
1593 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1597 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1599 static void espibug_workaround(void *data)
1601 struct adapter *adapter = (struct adapter *)data;
1602 struct sge *sge = adapter->sge;
1604 if (netif_running(adapter->port[0].dev)) {
1605 struct sk_buff *skb = sge->espibug_skb;
1607 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
1609 if ((seop & 0xfff0fff) == 0xfff && skb) {
1611 u8 ch_mac_addr[ETH_ALEN] =
1612 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
1613 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
1614 ch_mac_addr, ETH_ALEN);
1615 memcpy(skb->data + skb->len - 10, ch_mac_addr,
1620 /* bump the reference count to avoid freeing of the
1621 * skb once the DMA has completed.
1624 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
1627 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1631 * Creates a t1_sge structure and returns suggested resource parameters.
1633 struct sge * __devinit t1_sge_create(struct adapter *adapter,
1634 struct sge_params *p)
1636 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
1641 sge->adapter = adapter;
1642 sge->netdev = adapter->port[0].dev;
1643 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
1644 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
1646 init_timer(&sge->tx_reclaim_timer);
1647 sge->tx_reclaim_timer.data = (unsigned long)sge;
1648 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
1650 if (is_T2(sge->adapter)) {
1651 init_timer(&sge->espibug_timer);
1652 sge->espibug_timer.function = (void *)&espibug_workaround;
1653 sge->espibug_timer.data = (unsigned long)sge->adapter;
1654 sge->espibug_timeout = 1;
1658 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
1659 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
1660 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
1661 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
1662 p->rx_coalesce_usecs = 50;
1663 p->coalesce_enable = 0;
1664 p->sample_interval_usecs = 0;