37037e5890e19c56cc9fe5d8579982e024a5447c
[powerpc.git] / drivers / net / chelsio / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, write to the Free Software Foundation, Inc.,   *
15  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16  *                                                                           *
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20  *                                                                           *
21  * http://www.chelsio.com                                                    *
22  *                                                                           *
23  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24  * All rights reserved.                                                      *
25  *                                                                           *
26  * Maintainers: maintainers@chelsio.com                                      *
27  *                                                                           *
28  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29  *          Tina Yang               <tainay@chelsio.com>                     *
30  *          Felix Marti             <felix@chelsio.com>                      *
31  *          Scott Bardone           <sbardone@chelsio.com>                   *
32  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33  *          Frank DiMambro          <frank@chelsio.com>                      *
34  *                                                                           *
35  * History:                                                                  *
36  *                                                                           *
37  ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "espi.h"
57
58 #include <linux/workqueue.h>
59
60 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61 {
62         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63 }
64
65 static inline void cancel_mac_stats_update(struct adapter *ap)
66 {
67         cancel_delayed_work(&ap->stats_update_task);
68 }
69
70 #define MAX_CMDQ_ENTRIES 16384
71 #define MAX_CMDQ1_ENTRIES 1024
72 #define MAX_RX_BUFFERS 16384
73 #define MAX_RX_JUMBO_BUFFERS 16384
74 #define MAX_TX_BUFFERS_HIGH     16384U
75 #define MAX_TX_BUFFERS_LOW      1536U
76 #define MIN_FL_ENTRIES 32
77
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
79
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84 /*
85  * The EEPROM is actually bigger but only the first few bytes are used so we
86  * only report those.
87  */
88 #define EEPROM_SIZE 32
89
90 MODULE_DESCRIPTION(DRV_DESCRIPTION);
91 MODULE_AUTHOR("Chelsio Communications");
92 MODULE_LICENSE("GPL");
93
94 static int dflt_msg_enable = DFLT_MSG_ENABLE;
95
96 module_param(dflt_msg_enable, int, 0);
97 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
98
99
100 static const char pci_speed[][4] = {
101         "33", "66", "100", "133"
102 };
103
104 /*
105  * Setup MAC to receive the types of packets we want.
106  */
107 static void t1_set_rxmode(struct net_device *dev)
108 {
109         struct adapter *adapter = dev->priv;
110         struct cmac *mac = adapter->port[dev->if_port].mac;
111         struct t1_rx_mode rm;
112
113         rm.dev = dev;
114         rm.idx = 0;
115         rm.list = dev->mc_list;
116         mac->ops->set_rx_mode(mac, &rm);
117 }
118
119 static void link_report(struct port_info *p)
120 {
121         if (!netif_carrier_ok(p->dev))
122                 printk(KERN_INFO "%s: link down\n", p->dev->name);
123         else {
124                 const char *s = "10Mbps";
125
126                 switch (p->link_config.speed) {
127                         case SPEED_10000: s = "10Gbps"; break;
128                         case SPEED_1000:  s = "1000Mbps"; break;
129                         case SPEED_100:   s = "100Mbps"; break;
130                 }
131
132         printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
133                        p->dev->name, s,
134                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
135         }
136 }
137
138 void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
139                         int speed, int duplex, int pause)
140 {
141         struct port_info *p = &adapter->port[port_id];
142
143         if (link_stat != netif_carrier_ok(p->dev)) {
144                 if (link_stat)
145                         netif_carrier_on(p->dev);
146                 else
147                         netif_carrier_off(p->dev);
148                 link_report(p);
149
150         }
151 }
152
153 static void link_start(struct port_info *p)
154 {
155         struct cmac *mac = p->mac;
156
157         mac->ops->reset(mac);
158         if (mac->ops->macaddress_set)
159                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
160         t1_set_rxmode(p->dev);
161         t1_link_start(p->phy, mac, &p->link_config);
162         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
163 }
164
165 static void enable_hw_csum(struct adapter *adapter)
166 {
167         if (adapter->flags & TSO_CAPABLE)
168                 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
169         t1_tp_set_tcp_checksum_offload(adapter, 1);
170 }
171
172 /*
173  * Things to do upon first use of a card.
174  * This must run with the rtnl lock held.
175  */
176 static int cxgb_up(struct adapter *adapter)
177 {
178         int err = 0;
179
180         if (!(adapter->flags & FULL_INIT_DONE)) {
181                 err = t1_init_hw_modules(adapter);
182                 if (err)
183                         goto out_err;
184
185                 enable_hw_csum(adapter);
186                 adapter->flags |= FULL_INIT_DONE;
187         }
188
189         t1_interrupts_clear(adapter);
190         if ((err = request_irq(adapter->pdev->irq,
191                                t1_select_intr_handler(adapter), IRQF_SHARED,
192                                adapter->name, adapter))) {
193                 goto out_err;
194         }
195         t1_sge_start(adapter->sge);
196         t1_interrupts_enable(adapter);
197  out_err:
198         return err;
199 }
200
201 /*
202  * Release resources when all the ports have been stopped.
203  */
204 static void cxgb_down(struct adapter *adapter)
205 {
206         t1_sge_stop(adapter->sge);
207         t1_interrupts_disable(adapter);
208         free_irq(adapter->pdev->irq, adapter);
209 }
210
211 static int cxgb_open(struct net_device *dev)
212 {
213         int err;
214         struct adapter *adapter = dev->priv;
215         int other_ports = adapter->open_device_map & PORT_MASK;
216
217         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
218                 return err;
219
220         __set_bit(dev->if_port, &adapter->open_device_map);
221         link_start(&adapter->port[dev->if_port]);
222         netif_start_queue(dev);
223         if (!other_ports && adapter->params.stats_update_period)
224                 schedule_mac_stats_update(adapter,
225                                           adapter->params.stats_update_period);
226         return 0;
227 }
228
229 static int cxgb_close(struct net_device *dev)
230 {
231         struct adapter *adapter = dev->priv;
232         struct port_info *p = &adapter->port[dev->if_port];
233         struct cmac *mac = p->mac;
234
235         netif_stop_queue(dev);
236         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
237         netif_carrier_off(dev);
238
239         clear_bit(dev->if_port, &adapter->open_device_map);
240         if (adapter->params.stats_update_period &&
241             !(adapter->open_device_map & PORT_MASK)) {
242                 /* Stop statistics accumulation. */
243                 smp_mb__after_clear_bit();
244                 spin_lock(&adapter->work_lock);   /* sync with update task */
245                 spin_unlock(&adapter->work_lock);
246                 cancel_mac_stats_update(adapter);
247         }
248
249         if (!adapter->open_device_map)
250                 cxgb_down(adapter);
251         return 0;
252 }
253
254 static struct net_device_stats *t1_get_stats(struct net_device *dev)
255 {
256         struct adapter *adapter = dev->priv;
257         struct port_info *p = &adapter->port[dev->if_port];
258         struct net_device_stats *ns = &p->netstats;
259         const struct cmac_statistics *pstats;
260
261         /* Do a full update of the MAC stats */
262         pstats = p->mac->ops->statistics_update(p->mac,
263                                                 MAC_STATS_UPDATE_FULL);
264
265         ns->tx_packets = pstats->TxUnicastFramesOK +
266                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
267
268         ns->rx_packets = pstats->RxUnicastFramesOK +
269                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
270
271         ns->tx_bytes = pstats->TxOctetsOK;
272         ns->rx_bytes = pstats->RxOctetsOK;
273
274         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
275                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
276         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
277                 pstats->RxFCSErrors + pstats->RxAlignErrors +
278                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
279                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
280
281         ns->multicast  = pstats->RxMulticastFramesOK;
282         ns->collisions = pstats->TxTotalCollisions;
283
284         /* detailed rx_errors */
285         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
286                 pstats->RxJabberErrors;
287         ns->rx_over_errors   = 0;
288         ns->rx_crc_errors    = pstats->RxFCSErrors;
289         ns->rx_frame_errors  = pstats->RxAlignErrors;
290         ns->rx_fifo_errors   = 0;
291         ns->rx_missed_errors = 0;
292
293         /* detailed tx_errors */
294         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
295         ns->tx_carrier_errors   = 0;
296         ns->tx_fifo_errors      = pstats->TxUnderrun;
297         ns->tx_heartbeat_errors = 0;
298         ns->tx_window_errors    = pstats->TxLateCollisions;
299         return ns;
300 }
301
302 static u32 get_msglevel(struct net_device *dev)
303 {
304         struct adapter *adapter = dev->priv;
305
306         return adapter->msg_enable;
307 }
308
309 static void set_msglevel(struct net_device *dev, u32 val)
310 {
311         struct adapter *adapter = dev->priv;
312
313         adapter->msg_enable = val;
314 }
315
316 static char stats_strings[][ETH_GSTRING_LEN] = {
317         "TxOctetsOK",
318         "TxOctetsBad",
319         "TxUnicastFramesOK",
320         "TxMulticastFramesOK",
321         "TxBroadcastFramesOK",
322         "TxPauseFrames",
323         "TxFramesWithDeferredXmissions",
324         "TxLateCollisions",
325         "TxTotalCollisions",
326         "TxFramesAbortedDueToXSCollisions",
327         "TxUnderrun",
328         "TxLengthErrors",
329         "TxInternalMACXmitError",
330         "TxFramesWithExcessiveDeferral",
331         "TxFCSErrors",
332
333         "RxOctetsOK",
334         "RxOctetsBad",
335         "RxUnicastFramesOK",
336         "RxMulticastFramesOK",
337         "RxBroadcastFramesOK",
338         "RxPauseFrames",
339         "RxFCSErrors",
340         "RxAlignErrors",
341         "RxSymbolErrors",
342         "RxDataErrors",
343         "RxSequenceErrors",
344         "RxRuntErrors",
345         "RxJabberErrors",
346         "RxInternalMACRcvError",
347         "RxInRangeLengthErrors",
348         "RxOutOfRangeLengthField",
349         "RxFrameTooLongErrors",
350
351         "TSO",
352         "VLANextractions",
353         "VLANinsertions",
354         "RxCsumGood",
355         "TxCsumOffload",
356         "RxDrops"
357
358         "respQ_empty",
359         "respQ_overflow",
360         "freelistQ_empty",
361         "pkt_too_big",
362         "pkt_mismatch",
363         "cmdQ_full0",
364         "cmdQ_full1",
365         "tx_ipfrags",
366         "tx_reg_pkts",
367         "tx_lso_pkts",
368         "tx_do_cksum",
369
370         "espi_DIP2ParityErr",
371         "espi_DIP4Err",
372         "espi_RxDrops",
373         "espi_TxDrops",
374         "espi_RxOvfl",
375         "espi_ParityErr"
376 };
377
378 #define T2_REGMAP_SIZE (3 * 1024)
379
380 static int get_regs_len(struct net_device *dev)
381 {
382         return T2_REGMAP_SIZE;
383 }
384
385 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
386 {
387         struct adapter *adapter = dev->priv;
388
389         strcpy(info->driver, DRV_NAME);
390         strcpy(info->version, DRV_VERSION);
391         strcpy(info->fw_version, "N/A");
392         strcpy(info->bus_info, pci_name(adapter->pdev));
393 }
394
395 static int get_stats_count(struct net_device *dev)
396 {
397         return ARRAY_SIZE(stats_strings);
398 }
399
400 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
401 {
402         if (stringset == ETH_SS_STATS)
403                 memcpy(data, stats_strings, sizeof(stats_strings));
404 }
405
406 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
407                       u64 *data)
408 {
409         struct adapter *adapter = dev->priv;
410         struct cmac *mac = adapter->port[dev->if_port].mac;
411         const struct cmac_statistics *s;
412         const struct sge_port_stats *ss;
413         const struct sge_intr_counts *t;
414
415         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
416         ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
417         t = t1_sge_get_intr_counts(adapter->sge);
418
419         *data++ = s->TxOctetsOK;
420         *data++ = s->TxOctetsBad;
421         *data++ = s->TxUnicastFramesOK;
422         *data++ = s->TxMulticastFramesOK;
423         *data++ = s->TxBroadcastFramesOK;
424         *data++ = s->TxPauseFrames;
425         *data++ = s->TxFramesWithDeferredXmissions;
426         *data++ = s->TxLateCollisions;
427         *data++ = s->TxTotalCollisions;
428         *data++ = s->TxFramesAbortedDueToXSCollisions;
429         *data++ = s->TxUnderrun;
430         *data++ = s->TxLengthErrors;
431         *data++ = s->TxInternalMACXmitError;
432         *data++ = s->TxFramesWithExcessiveDeferral;
433         *data++ = s->TxFCSErrors;
434
435         *data++ = s->RxOctetsOK;
436         *data++ = s->RxOctetsBad;
437         *data++ = s->RxUnicastFramesOK;
438         *data++ = s->RxMulticastFramesOK;
439         *data++ = s->RxBroadcastFramesOK;
440         *data++ = s->RxPauseFrames;
441         *data++ = s->RxFCSErrors;
442         *data++ = s->RxAlignErrors;
443         *data++ = s->RxSymbolErrors;
444         *data++ = s->RxDataErrors;
445         *data++ = s->RxSequenceErrors;
446         *data++ = s->RxRuntErrors;
447         *data++ = s->RxJabberErrors;
448         *data++ = s->RxInternalMACRcvError;
449         *data++ = s->RxInRangeLengthErrors;
450         *data++ = s->RxOutOfRangeLengthField;
451         *data++ = s->RxFrameTooLongErrors;
452
453         *data++ = ss->tso;
454         *data++ = ss->vlan_xtract;
455         *data++ = ss->vlan_insert;
456         *data++ = ss->rx_cso_good;
457         *data++ = ss->tx_cso;
458         *data++ = ss->rx_drops;
459
460         *data++ = (u64)t->respQ_empty;
461         *data++ = (u64)t->respQ_overflow;
462         *data++ = (u64)t->freelistQ_empty;
463         *data++ = (u64)t->pkt_too_big;
464         *data++ = (u64)t->pkt_mismatch;
465         *data++ = (u64)t->cmdQ_full[0];
466         *data++ = (u64)t->cmdQ_full[1];
467         *data++ = (u64)t->tx_ipfrags;
468         *data++ = (u64)t->tx_reg_pkts;
469         *data++ = (u64)t->tx_lso_pkts;
470         *data++ = (u64)t->tx_do_cksum;
471 }
472
473 static inline void reg_block_dump(struct adapter *ap, void *buf,
474                                   unsigned int start, unsigned int end)
475 {
476         u32 *p = buf + start;
477
478         for ( ; start <= end; start += sizeof(u32))
479                 *p++ = readl(ap->regs + start);
480 }
481
482 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
483                      void *buf)
484 {
485         struct adapter *ap = dev->priv;
486
487         /*
488          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
489          */
490         regs->version = 2;
491
492         memset(buf, 0, T2_REGMAP_SIZE);
493         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
494 }
495
496 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
497 {
498         struct adapter *adapter = dev->priv;
499         struct port_info *p = &adapter->port[dev->if_port];
500
501         cmd->supported = p->link_config.supported;
502         cmd->advertising = p->link_config.advertising;
503
504         if (netif_carrier_ok(dev)) {
505                 cmd->speed = p->link_config.speed;
506                 cmd->duplex = p->link_config.duplex;
507         } else {
508                 cmd->speed = -1;
509                 cmd->duplex = -1;
510         }
511
512         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
513         cmd->phy_address = p->phy->addr;
514         cmd->transceiver = XCVR_EXTERNAL;
515         cmd->autoneg = p->link_config.autoneg;
516         cmd->maxtxpkt = 0;
517         cmd->maxrxpkt = 0;
518         return 0;
519 }
520
521 static int speed_duplex_to_caps(int speed, int duplex)
522 {
523         int cap = 0;
524
525         switch (speed) {
526         case SPEED_10:
527                 if (duplex == DUPLEX_FULL)
528                         cap = SUPPORTED_10baseT_Full;
529                 else
530                         cap = SUPPORTED_10baseT_Half;
531                 break;
532         case SPEED_100:
533                 if (duplex == DUPLEX_FULL)
534                         cap = SUPPORTED_100baseT_Full;
535                 else
536                         cap = SUPPORTED_100baseT_Half;
537                 break;
538         case SPEED_1000:
539                 if (duplex == DUPLEX_FULL)
540                         cap = SUPPORTED_1000baseT_Full;
541                 else
542                         cap = SUPPORTED_1000baseT_Half;
543                 break;
544         case SPEED_10000:
545                 if (duplex == DUPLEX_FULL)
546                         cap = SUPPORTED_10000baseT_Full;
547         }
548         return cap;
549 }
550
551 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
552                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
553                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
554                       ADVERTISED_10000baseT_Full)
555
556 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
557 {
558         struct adapter *adapter = dev->priv;
559         struct port_info *p = &adapter->port[dev->if_port];
560         struct link_config *lc = &p->link_config;
561
562         if (!(lc->supported & SUPPORTED_Autoneg))
563                 return -EOPNOTSUPP;             /* can't change speed/duplex */
564
565         if (cmd->autoneg == AUTONEG_DISABLE) {
566                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
567
568                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
569                         return -EINVAL;
570                 lc->requested_speed = cmd->speed;
571                 lc->requested_duplex = cmd->duplex;
572                 lc->advertising = 0;
573         } else {
574                 cmd->advertising &= ADVERTISED_MASK;
575                 if (cmd->advertising & (cmd->advertising - 1))
576                         cmd->advertising = lc->supported;
577                 cmd->advertising &= lc->supported;
578                 if (!cmd->advertising)
579                         return -EINVAL;
580                 lc->requested_speed = SPEED_INVALID;
581                 lc->requested_duplex = DUPLEX_INVALID;
582                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
583         }
584         lc->autoneg = cmd->autoneg;
585         if (netif_running(dev))
586                 t1_link_start(p->phy, p->mac, lc);
587         return 0;
588 }
589
590 static void get_pauseparam(struct net_device *dev,
591                            struct ethtool_pauseparam *epause)
592 {
593         struct adapter *adapter = dev->priv;
594         struct port_info *p = &adapter->port[dev->if_port];
595
596         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
597         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
598         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
599 }
600
601 static int set_pauseparam(struct net_device *dev,
602                           struct ethtool_pauseparam *epause)
603 {
604         struct adapter *adapter = dev->priv;
605         struct port_info *p = &adapter->port[dev->if_port];
606         struct link_config *lc = &p->link_config;
607
608         if (epause->autoneg == AUTONEG_DISABLE)
609                 lc->requested_fc = 0;
610         else if (lc->supported & SUPPORTED_Autoneg)
611                 lc->requested_fc = PAUSE_AUTONEG;
612         else
613                 return -EINVAL;
614
615         if (epause->rx_pause)
616                 lc->requested_fc |= PAUSE_RX;
617         if (epause->tx_pause)
618                 lc->requested_fc |= PAUSE_TX;
619         if (lc->autoneg == AUTONEG_ENABLE) {
620                 if (netif_running(dev))
621                         t1_link_start(p->phy, p->mac, lc);
622         } else {
623                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
624                 if (netif_running(dev))
625                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
626                                                          lc->fc);
627         }
628         return 0;
629 }
630
631 static u32 get_rx_csum(struct net_device *dev)
632 {
633         struct adapter *adapter = dev->priv;
634
635         return (adapter->flags & RX_CSUM_ENABLED) != 0;
636 }
637
638 static int set_rx_csum(struct net_device *dev, u32 data)
639 {
640         struct adapter *adapter = dev->priv;
641
642         if (data)
643                 adapter->flags |= RX_CSUM_ENABLED;
644         else
645                 adapter->flags &= ~RX_CSUM_ENABLED;
646         return 0;
647 }
648
649 static int set_tso(struct net_device *dev, u32 value)
650 {
651         struct adapter *adapter = dev->priv;
652
653         if (!(adapter->flags & TSO_CAPABLE))
654                 return value ? -EOPNOTSUPP : 0;
655         return ethtool_op_set_tso(dev, value);
656 }
657
658 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
659 {
660         struct adapter *adapter = dev->priv;
661         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
662
663         e->rx_max_pending = MAX_RX_BUFFERS;
664         e->rx_mini_max_pending = 0;
665         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
666         e->tx_max_pending = MAX_CMDQ_ENTRIES;
667
668         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
669         e->rx_mini_pending = 0;
670         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
671         e->tx_pending = adapter->params.sge.cmdQ_size[0];
672 }
673
674 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
675 {
676         struct adapter *adapter = dev->priv;
677         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
678
679         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
680             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
681             e->tx_pending > MAX_CMDQ_ENTRIES ||
682             e->rx_pending < MIN_FL_ENTRIES ||
683             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
684             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
685                 return -EINVAL;
686
687         if (adapter->flags & FULL_INIT_DONE)
688         return -EBUSY;
689
690         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
691         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
692         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
693         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
694                 MAX_CMDQ1_ENTRIES : e->tx_pending;
695         return 0;
696 }
697
698 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
699 {
700         struct adapter *adapter = dev->priv;
701
702         /*
703          * If RX coalescing is requested we use NAPI, otherwise interrupts.
704          * This choice can be made only when all ports and the TOE are off.
705          */
706         if (adapter->open_device_map == 0)
707                 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
708
709         if (adapter->params.sge.polling) {
710                 adapter->params.sge.rx_coalesce_usecs = 0;
711         } else {
712                 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
713         }
714         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
715         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
716         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
717         return 0;
718 }
719
720 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
721 {
722         struct adapter *adapter = dev->priv;
723
724         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
725         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
726         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
727         return 0;
728 }
729
730 static int get_eeprom_len(struct net_device *dev)
731 {
732     return EEPROM_SIZE;
733 }
734
735 #define EEPROM_MAGIC(ap) \
736         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
737
738 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
739                       u8 *data)
740 {
741         int i;
742         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
743         struct adapter *adapter = dev->priv;
744
745         e->magic = EEPROM_MAGIC(adapter);
746         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
747                 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
748         memcpy(data, buf + e->offset, e->len);
749         return 0;
750 }
751
752 static const struct ethtool_ops t1_ethtool_ops = {
753         .get_settings      = get_settings,
754         .set_settings      = set_settings,
755         .get_drvinfo       = get_drvinfo,
756         .get_msglevel      = get_msglevel,
757         .set_msglevel      = set_msglevel,
758         .get_ringparam     = get_sge_param,
759         .set_ringparam     = set_sge_param,
760         .get_coalesce      = get_coalesce,
761         .set_coalesce      = set_coalesce,
762         .get_eeprom_len    = get_eeprom_len,
763         .get_eeprom        = get_eeprom,
764         .get_pauseparam    = get_pauseparam,
765         .set_pauseparam    = set_pauseparam,
766         .get_rx_csum       = get_rx_csum,
767         .set_rx_csum       = set_rx_csum,
768         .get_tx_csum       = ethtool_op_get_tx_csum,
769         .set_tx_csum       = ethtool_op_set_tx_csum,
770         .get_sg            = ethtool_op_get_sg,
771         .set_sg            = ethtool_op_set_sg,
772         .get_link          = ethtool_op_get_link,
773         .get_strings       = get_strings,
774         .get_stats_count   = get_stats_count,
775         .get_ethtool_stats = get_stats,
776         .get_regs_len      = get_regs_len,
777         .get_regs          = get_regs,
778         .get_tso           = ethtool_op_get_tso,
779         .set_tso           = set_tso,
780 };
781
782 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
783 {
784         struct adapter *adapter = dev->priv;
785         struct mii_ioctl_data *data = if_mii(req);
786
787         switch (cmd) {
788         case SIOCGMIIPHY:
789                 data->phy_id = adapter->port[dev->if_port].phy->addr;
790                 /* FALLTHRU */
791         case SIOCGMIIREG: {
792                 struct cphy *phy = adapter->port[dev->if_port].phy;
793                 u32 val;
794
795                 if (!phy->mdio_read)
796             return -EOPNOTSUPP;
797                 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
798                                &val);
799                 data->val_out = val;
800                 break;
801         }
802         case SIOCSMIIREG: {
803                 struct cphy *phy = adapter->port[dev->if_port].phy;
804
805                 if (!capable(CAP_NET_ADMIN))
806                     return -EPERM;
807                 if (!phy->mdio_write)
808             return -EOPNOTSUPP;
809                 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
810                                 data->val_in);
811                 break;
812         }
813
814         default:
815                 return -EOPNOTSUPP;
816         }
817         return 0;
818 }
819
820 static int t1_change_mtu(struct net_device *dev, int new_mtu)
821 {
822         int ret;
823         struct adapter *adapter = dev->priv;
824         struct cmac *mac = adapter->port[dev->if_port].mac;
825
826         if (!mac->ops->set_mtu)
827         return -EOPNOTSUPP;
828         if (new_mtu < 68)
829         return -EINVAL;
830         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
831                 return ret;
832         dev->mtu = new_mtu;
833         return 0;
834 }
835
836 static int t1_set_mac_addr(struct net_device *dev, void *p)
837 {
838         struct adapter *adapter = dev->priv;
839         struct cmac *mac = adapter->port[dev->if_port].mac;
840         struct sockaddr *addr = p;
841
842         if (!mac->ops->macaddress_set)
843                 return -EOPNOTSUPP;
844
845         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
846         mac->ops->macaddress_set(mac, dev->dev_addr);
847         return 0;
848 }
849
850 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
851 static void vlan_rx_register(struct net_device *dev,
852                                    struct vlan_group *grp)
853 {
854         struct adapter *adapter = dev->priv;
855
856         spin_lock_irq(&adapter->async_lock);
857         adapter->vlan_grp = grp;
858         t1_set_vlan_accel(adapter, grp != NULL);
859         spin_unlock_irq(&adapter->async_lock);
860 }
861
862 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
863 {
864         struct adapter *adapter = dev->priv;
865
866         spin_lock_irq(&adapter->async_lock);
867         if (adapter->vlan_grp)
868                 adapter->vlan_grp->vlan_devices[vid] = NULL;
869         spin_unlock_irq(&adapter->async_lock);
870 }
871 #endif
872
873 #ifdef CONFIG_NET_POLL_CONTROLLER
874 static void t1_netpoll(struct net_device *dev)
875 {
876         unsigned long flags;
877         struct adapter *adapter = dev->priv;
878
879         local_irq_save(flags);
880         t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
881         local_irq_restore(flags);
882 }
883 #endif
884
885 /*
886  * Periodic accumulation of MAC statistics.  This is used only if the MAC
887  * does not have any other way to prevent stats counter overflow.
888  */
889 static void mac_stats_task(void *data)
890 {
891         int i;
892         struct adapter *adapter = data;
893
894         for_each_port(adapter, i) {
895                 struct port_info *p = &adapter->port[i];
896
897                 if (netif_running(p->dev))
898                         p->mac->ops->statistics_update(p->mac,
899                                                        MAC_STATS_UPDATE_FAST);
900         }
901
902         /* Schedule the next statistics update if any port is active. */
903         spin_lock(&adapter->work_lock);
904         if (adapter->open_device_map & PORT_MASK)
905                 schedule_mac_stats_update(adapter,
906                                           adapter->params.stats_update_period);
907         spin_unlock(&adapter->work_lock);
908 }
909
910 /*
911  * Processes elmer0 external interrupts in process context.
912  */
913 static void ext_intr_task(void *data)
914 {
915         struct adapter *adapter = data;
916
917         elmer0_ext_intr_handler(adapter);
918
919         /* Now reenable external interrupts */
920         spin_lock_irq(&adapter->async_lock);
921         adapter->slow_intr_mask |= F_PL_INTR_EXT;
922         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
923         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
924                    adapter->regs + A_PL_ENABLE);
925         spin_unlock_irq(&adapter->async_lock);
926 }
927
928 /*
929  * Interrupt-context handler for elmer0 external interrupts.
930  */
931 void t1_elmer0_ext_intr(struct adapter *adapter)
932 {
933         /*
934          * Schedule a task to handle external interrupts as we require
935          * a process context.  We disable EXT interrupts in the interim
936          * and let the task reenable them when it's done.
937          */
938         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
939         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
940                    adapter->regs + A_PL_ENABLE);
941         schedule_work(&adapter->ext_intr_handler_task);
942 }
943
944 void t1_fatal_err(struct adapter *adapter)
945 {
946         if (adapter->flags & FULL_INIT_DONE) {
947                 t1_sge_stop(adapter->sge);
948                 t1_interrupts_disable(adapter);
949         }
950         CH_ALERT("%s: encountered fatal error, operation suspended\n",
951                  adapter->name);
952 }
953
954 static int __devinit init_one(struct pci_dev *pdev,
955                               const struct pci_device_id *ent)
956 {
957         static int version_printed;
958
959         int i, err, pci_using_dac = 0;
960         unsigned long mmio_start, mmio_len;
961         const struct board_info *bi;
962         struct adapter *adapter = NULL;
963         struct port_info *pi;
964
965         if (!version_printed) {
966                 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
967                        DRV_VERSION);
968                 ++version_printed;
969         }
970
971         err = pci_enable_device(pdev);
972         if (err)
973                 return err;
974
975         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
976                 CH_ERR("%s: cannot find PCI device memory base address\n",
977                        pci_name(pdev));
978                 err = -ENODEV;
979                 goto out_disable_pdev;
980         }
981
982         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
983                 pci_using_dac = 1;
984
985                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
986                         CH_ERR("%s: unable to obtain 64-bit DMA for"
987                                "consistent allocations\n", pci_name(pdev));
988                         err = -ENODEV;
989                         goto out_disable_pdev;
990                 }
991
992         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
993                 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
994                 goto out_disable_pdev;
995         }
996
997         err = pci_request_regions(pdev, DRV_NAME);
998         if (err) {
999                 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1000                 goto out_disable_pdev;
1001         }
1002
1003         pci_set_master(pdev);
1004
1005         mmio_start = pci_resource_start(pdev, 0);
1006         mmio_len = pci_resource_len(pdev, 0);
1007         bi = t1_get_board_info(ent->driver_data);
1008
1009         for (i = 0; i < bi->port_number; ++i) {
1010                 struct net_device *netdev;
1011
1012                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1013                 if (!netdev) {
1014                         err = -ENOMEM;
1015                         goto out_free_dev;
1016                 }
1017
1018                 SET_MODULE_OWNER(netdev);
1019                 SET_NETDEV_DEV(netdev, &pdev->dev);
1020
1021                 if (!adapter) {
1022                         adapter = netdev->priv;
1023                         adapter->pdev = pdev;
1024                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1025
1026                         adapter->regs = ioremap(mmio_start, mmio_len);
1027                         if (!adapter->regs) {
1028                                 CH_ERR("%s: cannot map device registers\n",
1029                                        pci_name(pdev));
1030                                 err = -ENOMEM;
1031                                 goto out_free_dev;
1032                         }
1033
1034                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1035                                 err = -ENODEV;    /* Can't handle this chip rev */
1036                                 goto out_free_dev;
1037                         }
1038
1039                         adapter->name = pci_name(pdev);
1040                         adapter->msg_enable = dflt_msg_enable;
1041                         adapter->mmio_len = mmio_len;
1042
1043                         init_MUTEX(&adapter->mib_mutex);
1044                         spin_lock_init(&adapter->tpi_lock);
1045                         spin_lock_init(&adapter->work_lock);
1046                         spin_lock_init(&adapter->async_lock);
1047
1048                         INIT_WORK(&adapter->ext_intr_handler_task,
1049                                   ext_intr_task, adapter);
1050                         INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1051                                   adapter);
1052
1053                         pci_set_drvdata(pdev, netdev);
1054                 }
1055
1056                 pi = &adapter->port[i];
1057                 pi->dev = netdev;
1058                 netif_carrier_off(netdev);
1059                 netdev->irq = pdev->irq;
1060                 netdev->if_port = i;
1061                 netdev->mem_start = mmio_start;
1062                 netdev->mem_end = mmio_start + mmio_len - 1;
1063                 netdev->priv = adapter;
1064                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1065                 netdev->features |= NETIF_F_LLTX;
1066
1067                 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1068                 if (pci_using_dac)
1069                         netdev->features |= NETIF_F_HIGHDMA;
1070                 if (vlan_tso_capable(adapter)) {
1071 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1072                         adapter->flags |= VLAN_ACCEL_CAPABLE;
1073                         netdev->features |=
1074                                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1075                         netdev->vlan_rx_register = vlan_rx_register;
1076                         netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1077 #endif
1078                         adapter->flags |= TSO_CAPABLE;
1079                         netdev->features |= NETIF_F_TSO;
1080                 }
1081
1082                 netdev->open = cxgb_open;
1083                 netdev->stop = cxgb_close;
1084                 netdev->hard_start_xmit = t1_start_xmit;
1085                 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1086                         sizeof(struct cpl_tx_pkt_lso) :
1087                         sizeof(struct cpl_tx_pkt);
1088                 netdev->get_stats = t1_get_stats;
1089                 netdev->set_multicast_list = t1_set_rxmode;
1090                 netdev->do_ioctl = t1_ioctl;
1091                 netdev->change_mtu = t1_change_mtu;
1092                 netdev->set_mac_address = t1_set_mac_addr;
1093 #ifdef CONFIG_NET_POLL_CONTROLLER
1094                 netdev->poll_controller = t1_netpoll;
1095 #endif
1096                 netdev->weight = 64;
1097
1098                 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1099         }
1100
1101         if (t1_init_sw_modules(adapter, bi) < 0) {
1102                 err = -ENODEV;
1103                 goto out_free_dev;
1104         }
1105
1106         /*
1107          * The card is now ready to go.  If any errors occur during device
1108          * registration we do not fail the whole card but rather proceed only
1109          * with the ports we manage to register successfully.  However we must
1110          * register at least one net device.
1111          */
1112         for (i = 0; i < bi->port_number; ++i) {
1113                 err = register_netdev(adapter->port[i].dev);
1114                 if (err)
1115                         CH_WARN("%s: cannot register net device %s, skipping\n",
1116                                 pci_name(pdev), adapter->port[i].dev->name);
1117                 else {
1118                         /*
1119                          * Change the name we use for messages to the name of
1120                          * the first successfully registered interface.
1121                          */
1122                         if (!adapter->registered_device_map)
1123                                 adapter->name = adapter->port[i].dev->name;
1124
1125                         __set_bit(i, &adapter->registered_device_map);
1126                 }
1127         }
1128         if (!adapter->registered_device_map) {
1129                 CH_ERR("%s: could not register any net devices\n",
1130                        pci_name(pdev));
1131                 goto out_release_adapter_res;
1132         }
1133
1134         printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1135                bi->desc, adapter->params.chip_revision,
1136                adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1137                adapter->params.pci.speed, adapter->params.pci.width);
1138         return 0;
1139
1140  out_release_adapter_res:
1141         t1_free_sw_modules(adapter);
1142  out_free_dev:
1143         if (adapter) {
1144                 if (adapter->regs)
1145                         iounmap(adapter->regs);
1146                 for (i = bi->port_number - 1; i >= 0; --i)
1147                         if (adapter->port[i].dev)
1148                                 free_netdev(adapter->port[i].dev);
1149         }
1150         pci_release_regions(pdev);
1151  out_disable_pdev:
1152         pci_disable_device(pdev);
1153         pci_set_drvdata(pdev, NULL);
1154         return err;
1155 }
1156
1157 static inline void t1_sw_reset(struct pci_dev *pdev)
1158 {
1159         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1160         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1161 }
1162
1163 static void __devexit remove_one(struct pci_dev *pdev)
1164 {
1165         struct net_device *dev = pci_get_drvdata(pdev);
1166
1167         if (dev) {
1168                 int i;
1169                 struct adapter *adapter = dev->priv;
1170
1171                 for_each_port(adapter, i)
1172                         if (test_bit(i, &adapter->registered_device_map))
1173                                 unregister_netdev(adapter->port[i].dev);
1174
1175                 t1_free_sw_modules(adapter);
1176                 iounmap(adapter->regs);
1177                 while (--i >= 0)
1178                         if (adapter->port[i].dev)
1179                                 free_netdev(adapter->port[i].dev);
1180
1181                 pci_release_regions(pdev);
1182                 pci_disable_device(pdev);
1183                 pci_set_drvdata(pdev, NULL);
1184                 t1_sw_reset(pdev);
1185         }
1186 }
1187
1188 static struct pci_driver driver = {
1189         .name     = DRV_NAME,
1190         .id_table = t1_pci_tbl,
1191         .probe    = init_one,
1192         .remove   = __devexit_p(remove_one),
1193 };
1194
1195 static int __init t1_init_module(void)
1196 {
1197         return pci_register_driver(&driver);
1198 }
1199
1200 static void __exit t1_cleanup_module(void)
1201 {
1202         pci_unregister_driver(&driver);
1203 }
1204
1205 module_init(t1_init_module);
1206 module_exit(t1_cleanup_module);