Line 0
Link Here
|
|
|
1 |
/* |
2 |
* New driver for Marvell Yukon 2 chipset. |
3 |
* Based on earlier sk98lin, and skge driver. |
4 |
* |
5 |
* This driver intentionally does not support all the features |
6 |
* of the original driver such as link fail-over and link management because |
7 |
* those should be done at higher levels. |
8 |
* |
9 |
* Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> |
10 |
* |
11 |
* This program is free software; you can redistribute it and/or modify |
12 |
* it under the terms of the GNU General Public License as published by |
13 |
* the Free Software Foundation; either version 2 of the License, or |
14 |
* (at your option) any later version. |
15 |
* |
16 |
* This program is distributed in the hope that it will be useful, |
17 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 |
* GNU General Public License for more details. |
20 |
* |
21 |
* You should have received a copy of the GNU General Public License |
22 |
* along with this program; if not, write to the Free Software |
23 |
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
24 |
*/ |
25 |
|
26 |
/* |
27 |
* TOTEST |
28 |
* - speed setting |
29 |
* - suspend/resume |
30 |
*/ |
31 |
|
32 |
#include <linux/config.h> |
33 |
#include <linux/crc32.h> |
34 |
#include <linux/kernel.h> |
35 |
#include <linux/version.h> |
36 |
#include <linux/module.h> |
37 |
#include <linux/netdevice.h> |
38 |
#include <linux/dma-mapping.h> |
39 |
#include <linux/etherdevice.h> |
40 |
#include <linux/ethtool.h> |
41 |
#include <linux/pci.h> |
42 |
#include <linux/ip.h> |
43 |
#include <linux/tcp.h> |
44 |
#include <linux/in.h> |
45 |
#include <linux/delay.h> |
46 |
#include <linux/workqueue.h> |
47 |
#include <linux/if_vlan.h> |
48 |
#include <linux/prefetch.h> |
49 |
#include <linux/mii.h> |
50 |
|
51 |
#include <asm/irq.h> |
52 |
|
53 |
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
54 |
#define SKY2_VLAN_TAG_USED 1 |
55 |
#endif |
56 |
|
57 |
#include "sky2.h" |
58 |
|
59 |
#define DRV_NAME "sky2" |
60 |
#define DRV_VERSION "0.13" |
61 |
#define PFX DRV_NAME " " |
62 |
|
63 |
/* |
64 |
* The Yukon II chipset takes 64 bit command blocks (called list elements) |
65 |
* that are organized into three (receive, transmit, status) different rings |
66 |
* similar to Tigon3. A transmit can require several elements; |
67 |
* a receive requires one (or two if using 64 bit dma). |
68 |
*/ |
69 |
|
70 |
#define is_ec_a1(hw) \ |
71 |
unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \ |
72 |
(hw)->chip_rev == CHIP_REV_YU_EC_A1) |
73 |
|
74 |
#define RX_LE_SIZE 512 |
75 |
#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) |
76 |
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) |
77 |
#define RX_DEF_PENDING RX_MAX_PENDING |
78 |
#define RX_SKB_ALIGN 8 |
79 |
|
80 |
#define TX_RING_SIZE 512 |
81 |
#define TX_DEF_PENDING (TX_RING_SIZE - 1) |
82 |
#define TX_MIN_PENDING 64 |
83 |
#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS) |
84 |
|
85 |
#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ |
86 |
#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) |
87 |
#define ETH_JUMBO_MTU 9000 |
88 |
#define TX_WATCHDOG (5 * HZ) |
89 |
#define NAPI_WEIGHT 64 |
90 |
#define PHY_RETRIES 1000 |
91 |
|
92 |
static const u32 default_msg = |
93 |
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
94 |
| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR |
95 |
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; |
96 |
|
97 |
static int debug = -1; /* defaults above */ |
98 |
module_param(debug, int, 0); |
99 |
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
100 |
|
101 |
static int copybreak __read_mostly = 256; |
102 |
module_param(copybreak, int, 0); |
103 |
MODULE_PARM_DESC(copybreak, "Receive copy threshold"); |
104 |
|
105 |
static const struct pci_device_id sky2_id_table[] = { |
106 |
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, |
107 |
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
108 |
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, |
109 |
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, |
110 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, |
111 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, |
112 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, |
113 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, |
114 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, |
115 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, |
116 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, |
117 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, |
118 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, |
119 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, |
120 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, |
121 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, |
122 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, |
123 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, |
124 |
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, |
125 |
{ 0 } |
126 |
}; |
127 |
|
128 |
MODULE_DEVICE_TABLE(pci, sky2_id_table); |
129 |
|
130 |
/* Avoid conditionals by using array */ |
131 |
static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; |
132 |
static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; |
133 |
|
134 |
/* This driver supports yukon2 chipset only */ |
135 |
static const char *yukon2_name[] = { |
136 |
"XL", /* 0xb3 */ |
137 |
"EC Ultra", /* 0xb4 */ |
138 |
"UNKNOWN", /* 0xb5 */ |
139 |
"EC", /* 0xb6 */ |
140 |
"FE", /* 0xb7 */ |
141 |
}; |
142 |
|
143 |
/* Access to external PHY */ |
144 |
static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) |
145 |
{ |
146 |
int i; |
147 |
|
148 |
gma_write16(hw, port, GM_SMI_DATA, val); |
149 |
gma_write16(hw, port, GM_SMI_CTRL, |
150 |
GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); |
151 |
|
152 |
for (i = 0; i < PHY_RETRIES; i++) { |
153 |
if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) |
154 |
return 0; |
155 |
udelay(1); |
156 |
} |
157 |
|
158 |
printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name); |
159 |
return -ETIMEDOUT; |
160 |
} |
161 |
|
162 |
static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) |
163 |
{ |
164 |
int i; |
165 |
|
166 |
gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) |
167 |
| GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); |
168 |
|
169 |
for (i = 0; i < PHY_RETRIES; i++) { |
170 |
if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) { |
171 |
*val = gma_read16(hw, port, GM_SMI_DATA); |
172 |
return 0; |
173 |
} |
174 |
|
175 |
udelay(1); |
176 |
} |
177 |
|
178 |
return -ETIMEDOUT; |
179 |
} |
180 |
|
181 |
static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) |
182 |
{ |
183 |
u16 v; |
184 |
|
185 |
if (__gm_phy_read(hw, port, reg, &v) != 0) |
186 |
printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name); |
187 |
return v; |
188 |
} |
189 |
|
190 |
static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) |
191 |
{ |
192 |
u16 power_control; |
193 |
u32 reg1; |
194 |
int vaux; |
195 |
int ret = 0; |
196 |
|
197 |
pr_debug("sky2_set_power_state %d\n", state); |
198 |
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
199 |
|
200 |
pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control); |
201 |
vaux = (sky2_read8(hw, B0_CTST) & Y2_VAUX_AVAIL) && |
202 |
(power_control & PCI_PM_CAP_PME_D3cold); |
203 |
|
204 |
pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control); |
205 |
|
206 |
power_control |= PCI_PM_CTRL_PME_STATUS; |
207 |
power_control &= ~(PCI_PM_CTRL_STATE_MASK); |
208 |
|
209 |
switch (state) { |
210 |
case PCI_D0: |
211 |
/* switch power to VCC (WA for VAUX problem) */ |
212 |
sky2_write8(hw, B0_POWER_CTRL, |
213 |
PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); |
214 |
|
215 |
/* disable Core Clock Division, */ |
216 |
sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); |
217 |
|
218 |
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
219 |
/* enable bits are inverted */ |
220 |
sky2_write8(hw, B2_Y2_CLK_GATE, |
221 |
Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | |
222 |
Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | |
223 |
Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); |
224 |
else |
225 |
sky2_write8(hw, B2_Y2_CLK_GATE, 0); |
226 |
|
227 |
/* Turn off phy power saving */ |
228 |
pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); |
229 |
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
230 |
|
231 |
/* looks like this XL is back asswards .. */ |
232 |
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) { |
233 |
reg1 |= PCI_Y2_PHY1_COMA; |
234 |
if (hw->ports > 1) |
235 |
reg1 |= PCI_Y2_PHY2_COMA; |
236 |
} |
237 |
pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); |
238 |
break; |
239 |
|
240 |
case PCI_D3hot: |
241 |
case PCI_D3cold: |
242 |
/* Turn on phy power saving */ |
243 |
pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); |
244 |
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
245 |
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
246 |
else |
247 |
reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
248 |
pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); |
249 |
|
250 |
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
251 |
sky2_write8(hw, B2_Y2_CLK_GATE, 0); |
252 |
else |
253 |
/* enable bits are inverted */ |
254 |
sky2_write8(hw, B2_Y2_CLK_GATE, |
255 |
Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | |
256 |
Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | |
257 |
Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); |
258 |
|
259 |
/* switch power to VAUX */ |
260 |
if (vaux && state != PCI_D3cold) |
261 |
sky2_write8(hw, B0_POWER_CTRL, |
262 |
(PC_VAUX_ENA | PC_VCC_ENA | |
263 |
PC_VAUX_ON | PC_VCC_OFF)); |
264 |
break; |
265 |
default: |
266 |
printk(KERN_ERR PFX "Unknown power state %d\n", state); |
267 |
ret = -1; |
268 |
} |
269 |
|
270 |
pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control); |
271 |
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
272 |
return ret; |
273 |
} |
274 |
|
275 |
static void sky2_phy_reset(struct sky2_hw *hw, unsigned port) |
276 |
{ |
277 |
u16 reg; |
278 |
|
279 |
/* disable all GMAC IRQ's */ |
280 |
sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); |
281 |
/* disable PHY IRQs */ |
282 |
gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); |
283 |
|
284 |
gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ |
285 |
gma_write16(hw, port, GM_MC_ADDR_H2, 0); |
286 |
gma_write16(hw, port, GM_MC_ADDR_H3, 0); |
287 |
gma_write16(hw, port, GM_MC_ADDR_H4, 0); |
288 |
|
289 |
reg = gma_read16(hw, port, GM_RX_CTRL); |
290 |
reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; |
291 |
gma_write16(hw, port, GM_RX_CTRL, reg); |
292 |
} |
293 |
|
294 |
static void sky2_phy_init(struct sky2_hw *hw, unsigned port) |
295 |
{ |
296 |
struct sky2_port *sky2 = netdev_priv(hw->dev[port]); |
297 |
u16 ctrl, ct1000, adv, pg, ledctrl, ledover; |
298 |
|
299 |
if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) { |
300 |
u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); |
301 |
|
302 |
ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | |
303 |
PHY_M_EC_MAC_S_MSK); |
304 |
ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); |
305 |
|
306 |
if (hw->chip_id == CHIP_ID_YUKON_EC) |
307 |
ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; |
308 |
else |
309 |
ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3); |
310 |
|
311 |
gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); |
312 |
} |
313 |
|
314 |
ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); |
315 |
if (hw->copper) { |
316 |
if (hw->chip_id == CHIP_ID_YUKON_FE) { |
317 |
/* enable automatic crossover */ |
318 |
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; |
319 |
} else { |
320 |
/* disable energy detect */ |
321 |
ctrl &= ~PHY_M_PC_EN_DET_MSK; |
322 |
|
323 |
/* enable automatic crossover */ |
324 |
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); |
325 |
|
326 |
if (sky2->autoneg == AUTONEG_ENABLE && |
327 |
hw->chip_id == CHIP_ID_YUKON_XL) { |
328 |
ctrl &= ~PHY_M_PC_DSC_MSK; |
329 |
ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; |
330 |
} |
331 |
} |
332 |
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); |
333 |
} else { |
334 |
/* workaround for deviation #4.88 (CRC errors) */ |
335 |
/* disable Automatic Crossover */ |
336 |
|
337 |
ctrl &= ~PHY_M_PC_MDIX_MSK; |
338 |
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); |
339 |
|
340 |
if (hw->chip_id == CHIP_ID_YUKON_XL) { |
341 |
/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ |
342 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); |
343 |
ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); |
344 |
ctrl &= ~PHY_M_MAC_MD_MSK; |
345 |
ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); |
346 |
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); |
347 |
|
348 |
/* select page 1 to access Fiber registers */ |
349 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); |
350 |
} |
351 |
} |
352 |
|
353 |
ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); |
354 |
if (sky2->autoneg == AUTONEG_DISABLE) |
355 |
ctrl &= ~PHY_CT_ANE; |
356 |
else |
357 |
ctrl |= PHY_CT_ANE; |
358 |
|
359 |
ctrl |= PHY_CT_RESET; |
360 |
gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); |
361 |
|
362 |
ctrl = 0; |
363 |
ct1000 = 0; |
364 |
adv = PHY_AN_CSMA; |
365 |
|
366 |
if (sky2->autoneg == AUTONEG_ENABLE) { |
367 |
if (hw->copper) { |
368 |
if (sky2->advertising & ADVERTISED_1000baseT_Full) |
369 |
ct1000 |= PHY_M_1000C_AFD; |
370 |
if (sky2->advertising & ADVERTISED_1000baseT_Half) |
371 |
ct1000 |= PHY_M_1000C_AHD; |
372 |
if (sky2->advertising & ADVERTISED_100baseT_Full) |
373 |
adv |= PHY_M_AN_100_FD; |
374 |
if (sky2->advertising & ADVERTISED_100baseT_Half) |
375 |
adv |= PHY_M_AN_100_HD; |
376 |
if (sky2->advertising & ADVERTISED_10baseT_Full) |
377 |
adv |= PHY_M_AN_10_FD; |
378 |
if (sky2->advertising & ADVERTISED_10baseT_Half) |
379 |
adv |= PHY_M_AN_10_HD; |
380 |
} else /* special defines for FIBER (88E1011S only) */ |
381 |
adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; |
382 |
|
383 |
/* Set Flow-control capabilities */ |
384 |
if (sky2->tx_pause && sky2->rx_pause) |
385 |
adv |= PHY_AN_PAUSE_CAP; /* symmetric */ |
386 |
else if (sky2->rx_pause && !sky2->tx_pause) |
387 |
adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP; |
388 |
else if (!sky2->rx_pause && sky2->tx_pause) |
389 |
adv |= PHY_AN_PAUSE_ASYM; /* local */ |
390 |
|
391 |
/* Restart Auto-negotiation */ |
392 |
ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; |
393 |
} else { |
394 |
/* forced speed/duplex settings */ |
395 |
ct1000 = PHY_M_1000C_MSE; |
396 |
|
397 |
if (sky2->duplex == DUPLEX_FULL) |
398 |
ctrl |= PHY_CT_DUP_MD; |
399 |
|
400 |
switch (sky2->speed) { |
401 |
case SPEED_1000: |
402 |
ctrl |= PHY_CT_SP1000; |
403 |
break; |
404 |
case SPEED_100: |
405 |
ctrl |= PHY_CT_SP100; |
406 |
break; |
407 |
} |
408 |
|
409 |
ctrl |= PHY_CT_RESET; |
410 |
} |
411 |
|
412 |
if (hw->chip_id != CHIP_ID_YUKON_FE) |
413 |
gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); |
414 |
|
415 |
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); |
416 |
gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); |
417 |
|
418 |
/* Setup Phy LED's */ |
419 |
ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); |
420 |
ledover = 0; |
421 |
|
422 |
switch (hw->chip_id) { |
423 |
case CHIP_ID_YUKON_FE: |
424 |
/* on 88E3082 these bits are at 11..9 (shifted left) */ |
425 |
ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; |
426 |
|
427 |
ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); |
428 |
|
429 |
/* delete ACT LED control bits */ |
430 |
ctrl &= ~PHY_M_FELP_LED1_MSK; |
431 |
/* change ACT LED control to blink mode */ |
432 |
ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); |
433 |
gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); |
434 |
break; |
435 |
|
436 |
case CHIP_ID_YUKON_XL: |
437 |
pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
438 |
|
439 |
/* select page 3 to access LED control register */ |
440 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
441 |
|
442 |
/* set LED Function Control register */ |
443 |
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ |
444 |
PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ |
445 |
PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ |
446 |
PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ |
447 |
|
448 |
/* set Polarity Control register */ |
449 |
gm_phy_write(hw, port, PHY_MARV_PHY_STAT, |
450 |
(PHY_M_POLC_LS1_P_MIX(4) | |
451 |
PHY_M_POLC_IS0_P_MIX(4) | |
452 |
PHY_M_POLC_LOS_CTRL(2) | |
453 |
PHY_M_POLC_INIT_CTRL(2) | |
454 |
PHY_M_POLC_STA1_CTRL(2) | |
455 |
PHY_M_POLC_STA0_CTRL(2))); |
456 |
|
457 |
/* restore page register */ |
458 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
459 |
break; |
460 |
|
461 |
default: |
462 |
/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ |
463 |
ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; |
464 |
/* turn off the Rx LED (LED_RX) */ |
465 |
ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); |
466 |
} |
467 |
|
468 |
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); |
469 |
|
470 |
if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { |
471 |
/* turn on 100 Mbps LED (LED_LINK100) */ |
472 |
ledover |= PHY_M_LED_MO_100(MO_LED_ON); |
473 |
} |
474 |
|
475 |
if (ledover) |
476 |
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); |
477 |
|
478 |
/* Enable phy interrupt on auto-negotiation complete (or link up) */ |
479 |
if (sky2->autoneg == AUTONEG_ENABLE) |
480 |
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); |
481 |
else |
482 |
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); |
483 |
} |
484 |
|
485 |
/* Force a renegotiation */ |
486 |
static void sky2_phy_reinit(struct sky2_port *sky2) |
487 |
{ |
488 |
down(&sky2->phy_sema); |
489 |
sky2_phy_init(sky2->hw, sky2->port); |
490 |
up(&sky2->phy_sema); |
491 |
} |
492 |
|
493 |
static void sky2_mac_init(struct sky2_hw *hw, unsigned port) |
494 |
{ |
495 |
struct sky2_port *sky2 = netdev_priv(hw->dev[port]); |
496 |
u16 reg; |
497 |
int i; |
498 |
const u8 *addr = hw->dev[port]->dev_addr; |
499 |
|
500 |
sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); |
501 |
sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR|GPC_ENA_PAUSE); |
502 |
|
503 |
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); |
504 |
|
505 |
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) { |
506 |
/* WA DEV_472 -- looks like crossed wires on port 2 */ |
507 |
/* clear GMAC 1 Control reset */ |
508 |
sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); |
509 |
do { |
510 |
sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); |
511 |
sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); |
512 |
} while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || |
513 |
gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || |
514 |
gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); |
515 |
} |
516 |
|
517 |
if (sky2->autoneg == AUTONEG_DISABLE) { |
518 |
reg = gma_read16(hw, port, GM_GP_CTRL); |
519 |
reg |= GM_GPCR_AU_ALL_DIS; |
520 |
gma_write16(hw, port, GM_GP_CTRL, reg); |
521 |
gma_read16(hw, port, GM_GP_CTRL); |
522 |
|
523 |
switch (sky2->speed) { |
524 |
case SPEED_1000: |
525 |
reg |= GM_GPCR_SPEED_1000; |
526 |
/* fallthru */ |
527 |
case SPEED_100: |
528 |
reg |= GM_GPCR_SPEED_100; |
529 |
} |
530 |
|
531 |
if (sky2->duplex == DUPLEX_FULL) |
532 |
reg |= GM_GPCR_DUP_FULL; |
533 |
} else |
534 |
reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; |
535 |
|
536 |
if (!sky2->tx_pause && !sky2->rx_pause) { |
537 |
sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); |
538 |
reg |= |
539 |
GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; |
540 |
} else if (sky2->tx_pause && !sky2->rx_pause) { |
541 |
/* disable Rx flow-control */ |
542 |
reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; |
543 |
} |
544 |
|
545 |
gma_write16(hw, port, GM_GP_CTRL, reg); |
546 |
|
547 |
sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); |
548 |
|
549 |
down(&sky2->phy_sema); |
550 |
sky2_phy_init(hw, port); |
551 |
up(&sky2->phy_sema); |
552 |
|
553 |
/* MIB clear */ |
554 |
reg = gma_read16(hw, port, GM_PHY_ADDR); |
555 |
gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); |
556 |
|
557 |
for (i = 0; i < GM_MIB_CNT_SIZE; i++) |
558 |
gma_read16(hw, port, GM_MIB_CNT_BASE + 8 * i); |
559 |
gma_write16(hw, port, GM_PHY_ADDR, reg); |
560 |
|
561 |
/* transmit control */ |
562 |
gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); |
563 |
|
564 |
/* receive control reg: unicast + multicast + no FCS */ |
565 |
gma_write16(hw, port, GM_RX_CTRL, |
566 |
GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); |
567 |
|
568 |
/* transmit flow control */ |
569 |
gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); |
570 |
|
571 |
/* transmit parameter */ |
572 |
gma_write16(hw, port, GM_TX_PARAM, |
573 |
TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | |
574 |
TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | |
575 |
TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | |
576 |
TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); |
577 |
|
578 |
/* serial mode register */ |
579 |
reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | |
580 |
GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); |
581 |
|
582 |
if (hw->dev[port]->mtu > ETH_DATA_LEN) |
583 |
reg |= GM_SMOD_JUMBO_ENA; |
584 |
|
585 |
gma_write16(hw, port, GM_SERIAL_MODE, reg); |
586 |
|
587 |
/* virtual address for data */ |
588 |
gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); |
589 |
|
590 |
/* physical address: used for pause frames */ |
591 |
gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); |
592 |
|
593 |
/* ignore counter overflows */ |
594 |
gma_write16(hw, port, GM_TX_IRQ_MSK, 0); |
595 |
gma_write16(hw, port, GM_RX_IRQ_MSK, 0); |
596 |
gma_write16(hw, port, GM_TR_IRQ_MSK, 0); |
597 |
|
598 |
/* Configure Rx MAC FIFO */ |
599 |
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); |
600 |
sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T), |
601 |
GMF_RX_CTRL_DEF); |
602 |
|
603 |
/* Flush Rx MAC FIFO on any flow control or error */ |
604 |
sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); |
605 |
|
606 |
/* Set threshold to 0xa (64 bytes) |
607 |
* ASF disabled so no need to do WA dev #4.30 |
608 |
*/ |
609 |
sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); |
610 |
|
611 |
/* Configure Tx MAC FIFO */ |
612 |
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); |
613 |
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); |
614 |
|
615 |
if (hw->chip_id == CHIP_ID_YUKON_EC_U) { |
616 |
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); |
617 |
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); |
618 |
if (hw->dev[port]->mtu > ETH_DATA_LEN) { |
619 |
/* set Tx GMAC FIFO Almost Empty Threshold */ |
620 |
sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180); |
621 |
/* Disable Store & Forward mode for TX */ |
622 |
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); |
623 |
} |
624 |
} |
625 |
|
626 |
} |
627 |
|
628 |
/* Assign Ram Buffer allocation. |
629 |
* start and end are in units of 4k bytes |
630 |
* ram registers are in units of 64bit words |
631 |
*/ |
632 |
static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk) |
633 |
{ |
634 |
u32 start, end; |
635 |
|
636 |
start = startk * 4096/8; |
637 |
end = (endk * 4096/8) - 1; |
638 |
|
639 |
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); |
640 |
sky2_write32(hw, RB_ADDR(q, RB_START), start); |
641 |
sky2_write32(hw, RB_ADDR(q, RB_END), end); |
642 |
sky2_write32(hw, RB_ADDR(q, RB_WP), start); |
643 |
sky2_write32(hw, RB_ADDR(q, RB_RP), start); |
644 |
|
645 |
if (q == Q_R1 || q == Q_R2) { |
646 |
u32 space = (endk - startk) * 4096/8; |
647 |
u32 tp = space - space/4; |
648 |
|
649 |
/* On receive queue's set the thresholds |
650 |
* give receiver priority when > 3/4 full |
651 |
* send pause when down to 2K |
652 |
*/ |
653 |
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); |
654 |
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); |
655 |
|
656 |
tp = space - 2048/8; |
657 |
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); |
658 |
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); |
659 |
} else { |
660 |
/* Enable store & forward on Tx queue's because |
661 |
* Tx FIFO is only 1K on Yukon |
662 |
*/ |
663 |
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); |
664 |
} |
665 |
|
666 |
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); |
667 |
sky2_read8(hw, RB_ADDR(q, RB_CTRL)); |
668 |
} |
669 |
|
670 |
/* Setup Bus Memory Interface */ |
671 |
static void sky2_qset(struct sky2_hw *hw, u16 q) |
672 |
{ |
673 |
sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); |
674 |
sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); |
675 |
sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); |
676 |
sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); |
677 |
} |
678 |
|
679 |
/* Setup prefetch unit registers. This is the interface between |
680 |
* hardware and driver list elements |
681 |
*/ |
682 |
static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, |
683 |
u64 addr, u32 last) |
684 |
{ |
685 |
sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); |
686 |
sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); |
687 |
sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32); |
688 |
sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr); |
689 |
sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); |
690 |
sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); |
691 |
|
692 |
sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); |
693 |
} |
694 |
|
695 |
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) |
696 |
{ |
697 |
struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; |
698 |
|
699 |
sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE; |
700 |
return le; |
701 |
} |
702 |
|
703 |
/* |
704 |
* This is a workaround code taken from SysKonnect sk98lin driver |
705 |
* to deal with chip bug on Yukon EC rev 0 in the wraparound case. |
706 |
*/ |
707 |
static void sky2_put_idx(struct sky2_hw *hw, unsigned q, |
708 |
u16 idx, u16 *last, u16 size) |
709 |
{ |
710 |
wmb(); |
711 |
if (is_ec_a1(hw) && idx < *last) { |
712 |
u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); |
713 |
|
714 |
if (hwget == 0) { |
715 |
/* Start prefetching again */ |
716 |
sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0); |
717 |
goto setnew; |
718 |
} |
719 |
|
720 |
if (hwget == size - 1) { |
721 |
/* set watermark to one list element */ |
722 |
sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8); |
723 |
|
724 |
/* set put index to first list element */ |
725 |
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0); |
726 |
} else /* have hardware go to end of list */ |
727 |
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), |
728 |
size - 1); |
729 |
} else { |
730 |
setnew: |
731 |
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); |
732 |
} |
733 |
*last = idx; |
734 |
mmiowb(); |
735 |
} |
736 |
|
737 |
|
738 |
static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) |
739 |
{ |
740 |
struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; |
741 |
sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE; |
742 |
return le; |
743 |
} |
744 |
|
745 |
/* Return high part of DMA address (could be 32 or 64 bit) */ |
746 |
static inline u32 high32(dma_addr_t a) |
747 |
{ |
748 |
return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0; |
749 |
} |
750 |
|
751 |
/* Build description to hardware about buffer */ |
752 |
static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) |
753 |
{ |
754 |
struct sky2_rx_le *le; |
755 |
u32 hi = high32(map); |
756 |
u16 len = sky2->rx_bufsize; |
757 |
|
758 |
if (sky2->rx_addr64 != hi) { |
759 |
le = sky2_next_rx(sky2); |
760 |
le->addr = cpu_to_le32(hi); |
761 |
le->ctrl = 0; |
762 |
le->opcode = OP_ADDR64 | HW_OWNER; |
763 |
sky2->rx_addr64 = high32(map + len); |
764 |
} |
765 |
|
766 |
le = sky2_next_rx(sky2); |
767 |
le->addr = cpu_to_le32((u32) map); |
768 |
le->length = cpu_to_le16(len); |
769 |
le->ctrl = 0; |
770 |
le->opcode = OP_PACKET | HW_OWNER; |
771 |
} |
772 |
|
773 |
|
774 |
/* Tell chip where to start receive checksum. |
775 |
* Actually has two checksums, but set both same to avoid possible byte |
776 |
* order problems. |
777 |
*/ |
778 |
static void rx_set_checksum(struct sky2_port *sky2) |
779 |
{ |
780 |
struct sky2_rx_le *le; |
781 |
|
782 |
le = sky2_next_rx(sky2); |
783 |
le->addr = (ETH_HLEN << 16) | ETH_HLEN; |
784 |
le->ctrl = 0; |
785 |
le->opcode = OP_TCPSTART | HW_OWNER; |
786 |
|
787 |
sky2_write32(sky2->hw, |
788 |
Q_ADDR(rxqaddr[sky2->port], Q_CSR), |
789 |
sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); |
790 |
|
791 |
} |
792 |
|
793 |
/* |
794 |
* The RX Stop command will not work for Yukon-2 if the BMU does not |
795 |
* reach the end of packet and since we can't make sure that we have |
796 |
* incoming data, we must reset the BMU while it is not doing a DMA |
797 |
* transfer. Since it is possible that the RX path is still active, |
798 |
* the RX RAM buffer will be stopped first, so any possible incoming |
799 |
* data will not trigger a DMA. After the RAM buffer is stopped, the |
800 |
* BMU is polled until any DMA in progress is ended and only then it |
801 |
* will be reset. |
802 |
*/ |
803 |
static void sky2_rx_stop(struct sky2_port *sky2) |
804 |
{ |
805 |
struct sky2_hw *hw = sky2->hw; |
806 |
unsigned rxq = rxqaddr[sky2->port]; |
807 |
int i; |
808 |
|
809 |
/* disable the RAM Buffer receive queue */ |
810 |
sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); |
811 |
|
812 |
for (i = 0; i < 0xffff; i++) |
813 |
if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) |
814 |
== sky2_read8(hw, RB_ADDR(rxq, Q_RL))) |
815 |
goto stopped; |
816 |
|
817 |
printk(KERN_WARNING PFX "%s: receiver stop failed\n", |
818 |
sky2->netdev->name); |
819 |
stopped: |
820 |
sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); |
821 |
|
822 |
/* reset the Rx prefetch unit */ |
823 |
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); |
824 |
} |
825 |
|
826 |
/* Clean out receive buffer area, assumes receiver hardware stopped */ |
827 |
static void sky2_rx_clean(struct sky2_port *sky2) |
828 |
{ |
829 |
unsigned i; |
830 |
|
831 |
memset(sky2->rx_le, 0, RX_LE_BYTES); |
832 |
for (i = 0; i < sky2->rx_pending; i++) { |
833 |
struct ring_info *re = sky2->rx_ring + i; |
834 |
|
835 |
if (re->skb) { |
836 |
pci_unmap_single(sky2->hw->pdev, |
837 |
re->mapaddr, sky2->rx_bufsize, |
838 |
PCI_DMA_FROMDEVICE); |
839 |
kfree_skb(re->skb); |
840 |
re->skb = NULL; |
841 |
} |
842 |
} |
843 |
} |
844 |
|
845 |
/* Basic MII support */ |
846 |
static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
847 |
{ |
848 |
struct mii_ioctl_data *data = if_mii(ifr); |
849 |
struct sky2_port *sky2 = netdev_priv(dev); |
850 |
struct sky2_hw *hw = sky2->hw; |
851 |
int err = -EOPNOTSUPP; |
852 |
|
853 |
if (!netif_running(dev)) |
854 |
return -ENODEV; /* Phy still in reset */ |
855 |
|
856 |
switch(cmd) { |
857 |
case SIOCGMIIPHY: |
858 |
data->phy_id = PHY_ADDR_MARV; |
859 |
|
860 |
/* fallthru */ |
861 |
case SIOCGMIIREG: { |
862 |
u16 val = 0; |
863 |
|
864 |
down(&sky2->phy_sema); |
865 |
err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); |
866 |
up(&sky2->phy_sema); |
867 |
|
868 |
data->val_out = val; |
869 |
break; |
870 |
} |
871 |
|
872 |
case SIOCSMIIREG: |
873 |
if (!capable(CAP_NET_ADMIN)) |
874 |
return -EPERM; |
875 |
|
876 |
down(&sky2->phy_sema); |
877 |
err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, |
878 |
data->val_in); |
879 |
up(&sky2->phy_sema); |
880 |
break; |
881 |
} |
882 |
return err; |
883 |
} |
884 |
|
885 |
#ifdef SKY2_VLAN_TAG_USED |
886 |
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) |
887 |
{ |
888 |
struct sky2_port *sky2 = netdev_priv(dev); |
889 |
struct sky2_hw *hw = sky2->hw; |
890 |
u16 port = sky2->port; |
891 |
|
892 |
spin_lock_bh(&sky2->tx_lock); |
893 |
|
894 |
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); |
895 |
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); |
896 |
sky2->vlgrp = grp; |
897 |
|
898 |
spin_unlock_bh(&sky2->tx_lock); |
899 |
} |
900 |
|
901 |
static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) |
902 |
{ |
903 |
struct sky2_port *sky2 = netdev_priv(dev); |
904 |
struct sky2_hw *hw = sky2->hw; |
905 |
u16 port = sky2->port; |
906 |
|
907 |
spin_lock_bh(&sky2->tx_lock); |
908 |
|
909 |
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); |
910 |
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); |
911 |
if (sky2->vlgrp) |
912 |
sky2->vlgrp->vlan_devices[vid] = NULL; |
913 |
|
914 |
spin_unlock_bh(&sky2->tx_lock); |
915 |
} |
916 |
#endif |
917 |
|
918 |
/* |
919 |
* It appears the hardware has a bug in the FIFO logic that |
920 |
* cause it to hang if the FIFO gets overrun and the receive buffer |
921 |
* is not aligned. ALso alloc_skb() won't align properly if slab |
922 |
* debugging is enabled. |
923 |
*/ |
924 |
static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask) |
925 |
{ |
926 |
struct sk_buff *skb; |
927 |
|
928 |
skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask); |
929 |
if (likely(skb)) { |
930 |
unsigned long p = (unsigned long) skb->data; |
931 |
skb_reserve(skb, |
932 |
((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p); |
933 |
} |
934 |
|
935 |
return skb; |
936 |
} |
937 |
|
938 |
/* |
939 |
* Allocate and setup receiver buffer pool. |
940 |
* In case of 64 bit dma, there are 2X as many list elements |
941 |
* available as ring entries |
942 |
* and need to reserve one list element so we don't wrap around. |
943 |
*/ |
944 |
static int sky2_rx_start(struct sky2_port *sky2) |
945 |
{ |
946 |
struct sky2_hw *hw = sky2->hw; |
947 |
unsigned rxq = rxqaddr[sky2->port]; |
948 |
int i; |
949 |
|
950 |
sky2->rx_put = sky2->rx_next = 0; |
951 |
sky2_qset(hw, rxq); |
952 |
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); |
953 |
|
954 |
rx_set_checksum(sky2); |
955 |
for (i = 0; i < sky2->rx_pending; i++) { |
956 |
struct ring_info *re = sky2->rx_ring + i; |
957 |
|
958 |
re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL); |
959 |
if (!re->skb) |
960 |
goto nomem; |
961 |
|
962 |
re->mapaddr = pci_map_single(hw->pdev, re->skb->data, |
963 |
sky2->rx_bufsize, PCI_DMA_FROMDEVICE); |
964 |
sky2_rx_add(sky2, re->mapaddr); |
965 |
} |
966 |
|
967 |
/* Tell chip about available buffers */ |
968 |
sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); |
969 |
sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX)); |
970 |
return 0; |
971 |
nomem: |
972 |
sky2_rx_clean(sky2); |
973 |
return -ENOMEM; |
974 |
} |
975 |
|
976 |
/* Bring up network interface. */ |
977 |
static int sky2_up(struct net_device *dev) |
978 |
{ |
979 |
struct sky2_port *sky2 = netdev_priv(dev); |
980 |
struct sky2_hw *hw = sky2->hw; |
981 |
unsigned port = sky2->port; |
982 |
u32 ramsize, rxspace; |
983 |
int err = -ENOMEM; |
984 |
|
985 |
if (netif_msg_ifup(sky2)) |
986 |
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
987 |
|
988 |
/* must be power of 2 */ |
989 |
sky2->tx_le = pci_alloc_consistent(hw->pdev, |
990 |
TX_RING_SIZE * |
991 |
sizeof(struct sky2_tx_le), |
992 |
&sky2->tx_le_map); |
993 |
if (!sky2->tx_le) |
994 |
goto err_out; |
995 |
|
996 |
sky2->tx_ring = kcalloc(TX_RING_SIZE, sizeof(struct tx_ring_info), |
997 |
GFP_KERNEL); |
998 |
if (!sky2->tx_ring) |
999 |
goto err_out; |
1000 |
sky2->tx_prod = sky2->tx_cons = 0; |
1001 |
|
1002 |
sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, |
1003 |
&sky2->rx_le_map); |
1004 |
if (!sky2->rx_le) |
1005 |
goto err_out; |
1006 |
memset(sky2->rx_le, 0, RX_LE_BYTES); |
1007 |
|
1008 |
sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct ring_info), |
1009 |
GFP_KERNEL); |
1010 |
if (!sky2->rx_ring) |
1011 |
goto err_out; |
1012 |
|
1013 |
sky2_mac_init(hw, port); |
1014 |
|
1015 |
/* Determine available ram buffer space (in 4K blocks). |
1016 |
* Note: not sure about the FE setting below yet |
1017 |
*/ |
1018 |
if (hw->chip_id == CHIP_ID_YUKON_FE) |
1019 |
ramsize = 4; |
1020 |
else |
1021 |
ramsize = sky2_read8(hw, B2_E_0); |
1022 |
|
1023 |
/* Give transmitter one third (rounded up) */ |
1024 |
rxspace = ramsize - (ramsize + 2) / 3; |
1025 |
|
1026 |
sky2_ramset(hw, rxqaddr[port], 0, rxspace); |
1027 |
sky2_ramset(hw, txqaddr[port], rxspace, ramsize); |
1028 |
|
1029 |
/* Make sure SyncQ is disabled */ |
1030 |
sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), |
1031 |
RB_RST_SET); |
1032 |
|
1033 |
sky2_qset(hw, txqaddr[port]); |
1034 |
if (hw->chip_id == CHIP_ID_YUKON_EC_U) |
1035 |
sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); |
1036 |
|
1037 |
|
1038 |
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
1039 |
TX_RING_SIZE - 1); |
1040 |
|
1041 |
err = sky2_rx_start(sky2); |
1042 |
if (err) |
1043 |
goto err_out; |
1044 |
|
1045 |
/* Enable interrupts from phy/mac for port */ |
1046 |
hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; |
1047 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1048 |
return 0; |
1049 |
|
1050 |
err_out: |
1051 |
if (sky2->rx_le) { |
1052 |
pci_free_consistent(hw->pdev, RX_LE_BYTES, |
1053 |
sky2->rx_le, sky2->rx_le_map); |
1054 |
sky2->rx_le = NULL; |
1055 |
} |
1056 |
if (sky2->tx_le) { |
1057 |
pci_free_consistent(hw->pdev, |
1058 |
TX_RING_SIZE * sizeof(struct sky2_tx_le), |
1059 |
sky2->tx_le, sky2->tx_le_map); |
1060 |
sky2->tx_le = NULL; |
1061 |
} |
1062 |
kfree(sky2->tx_ring); |
1063 |
kfree(sky2->rx_ring); |
1064 |
|
1065 |
sky2->tx_ring = NULL; |
1066 |
sky2->rx_ring = NULL; |
1067 |
return err; |
1068 |
} |
1069 |
|
1070 |
/* Modular subtraction in ring */ |
1071 |
static inline int tx_dist(unsigned tail, unsigned head) |
1072 |
{ |
1073 |
return (head - tail) % TX_RING_SIZE; |
1074 |
} |
1075 |
|
1076 |
/* Number of list elements available for next tx */ |
1077 |
static inline int tx_avail(const struct sky2_port *sky2) |
1078 |
{ |
1079 |
return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); |
1080 |
} |
1081 |
|
1082 |
/* Estimate of number of transmit list elements required */ |
1083 |
static unsigned tx_le_req(const struct sk_buff *skb) |
1084 |
{ |
1085 |
unsigned count; |
1086 |
|
1087 |
count = sizeof(dma_addr_t) / sizeof(u32); |
1088 |
count += skb_shinfo(skb)->nr_frags * count; |
1089 |
|
1090 |
if (skb_shinfo(skb)->tso_size) |
1091 |
++count; |
1092 |
|
1093 |
if (skb->ip_summed == CHECKSUM_HW) |
1094 |
++count; |
1095 |
|
1096 |
return count; |
1097 |
} |
1098 |
|
1099 |
/* |
1100 |
* Put one packet in ring for transmit. |
1101 |
* A single packet can generate multiple list elements, and |
1102 |
* the number of ring elements will probably be less than the number |
1103 |
* of list elements used. |
1104 |
* |
1105 |
* No BH disabling for tx_lock here (like tg3) |
1106 |
*/ |
1107 |
static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) |
1108 |
{ |
1109 |
struct sky2_port *sky2 = netdev_priv(dev); |
1110 |
struct sky2_hw *hw = sky2->hw; |
1111 |
struct sky2_tx_le *le = NULL; |
1112 |
struct tx_ring_info *re; |
1113 |
unsigned i, len; |
1114 |
dma_addr_t mapping; |
1115 |
u32 addr64; |
1116 |
u16 mss; |
1117 |
u8 ctrl; |
1118 |
|
1119 |
/* No BH disabling for tx_lock here. We are running in BH disabled |
1120 |
* context and TX reclaim runs via poll inside of a software |
1121 |
* interrupt, and no related locks in IRQ processing. |
1122 |
*/ |
1123 |
if (!spin_trylock(&sky2->tx_lock)) |
1124 |
return NETDEV_TX_LOCKED; |
1125 |
|
1126 |
if (unlikely(tx_avail(sky2) < tx_le_req(skb))) { |
1127 |
/* There is a known but harmless race with lockless tx |
1128 |
* and netif_stop_queue. |
1129 |
*/ |
1130 |
if (!netif_queue_stopped(dev)) { |
1131 |
netif_stop_queue(dev); |
1132 |
if (net_ratelimit()) |
1133 |
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", |
1134 |
dev->name); |
1135 |
} |
1136 |
spin_unlock(&sky2->tx_lock); |
1137 |
|
1138 |
return NETDEV_TX_BUSY; |
1139 |
} |
1140 |
|
1141 |
if (unlikely(netif_msg_tx_queued(sky2))) |
1142 |
printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n", |
1143 |
dev->name, sky2->tx_prod, skb->len); |
1144 |
|
1145 |
len = skb_headlen(skb); |
1146 |
mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
1147 |
addr64 = high32(mapping); |
1148 |
|
1149 |
re = sky2->tx_ring + sky2->tx_prod; |
1150 |
|
1151 |
/* Send high bits if changed or crosses boundary */ |
1152 |
if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) { |
1153 |
le = get_tx_le(sky2); |
1154 |
le->tx.addr = cpu_to_le32(addr64); |
1155 |
le->ctrl = 0; |
1156 |
le->opcode = OP_ADDR64 | HW_OWNER; |
1157 |
sky2->tx_addr64 = high32(mapping + len); |
1158 |
} |
1159 |
|
1160 |
/* Check for TCP Segmentation Offload */ |
1161 |
mss = skb_shinfo(skb)->tso_size; |
1162 |
if (mss != 0) { |
1163 |
/* just drop the packet if non-linear expansion fails */ |
1164 |
if (skb_header_cloned(skb) && |
1165 |
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
1166 |
dev_kfree_skb_any(skb); |
1167 |
goto out_unlock; |
1168 |
} |
1169 |
|
1170 |
mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ |
1171 |
mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); |
1172 |
mss += ETH_HLEN; |
1173 |
} |
1174 |
|
1175 |
if (mss != sky2->tx_last_mss) { |
1176 |
le = get_tx_le(sky2); |
1177 |
le->tx.tso.size = cpu_to_le16(mss); |
1178 |
le->tx.tso.rsvd = 0; |
1179 |
le->opcode = OP_LRGLEN | HW_OWNER; |
1180 |
le->ctrl = 0; |
1181 |
sky2->tx_last_mss = mss; |
1182 |
} |
1183 |
|
1184 |
ctrl = 0; |
1185 |
#ifdef SKY2_VLAN_TAG_USED |
1186 |
/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ |
1187 |
if (sky2->vlgrp && vlan_tx_tag_present(skb)) { |
1188 |
if (!le) { |
1189 |
le = get_tx_le(sky2); |
1190 |
le->tx.addr = 0; |
1191 |
le->opcode = OP_VLAN|HW_OWNER; |
1192 |
le->ctrl = 0; |
1193 |
} else |
1194 |
le->opcode |= OP_VLAN; |
1195 |
le->length = cpu_to_be16(vlan_tx_tag_get(skb)); |
1196 |
ctrl |= INS_VLAN; |
1197 |
} |
1198 |
#endif |
1199 |
|
1200 |
/* Handle TCP checksum offload */ |
1201 |
if (skb->ip_summed == CHECKSUM_HW) { |
1202 |
u16 hdr = skb->h.raw - skb->data; |
1203 |
u16 offset = hdr + skb->csum; |
1204 |
|
1205 |
ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; |
1206 |
if (skb->nh.iph->protocol == IPPROTO_UDP) |
1207 |
ctrl |= UDPTCP; |
1208 |
|
1209 |
le = get_tx_le(sky2); |
1210 |
le->tx.csum.start = cpu_to_le16(hdr); |
1211 |
le->tx.csum.offset = cpu_to_le16(offset); |
1212 |
le->length = 0; /* initial checksum value */ |
1213 |
le->ctrl = 1; /* one packet */ |
1214 |
le->opcode = OP_TCPLISW | HW_OWNER; |
1215 |
} |
1216 |
|
1217 |
le = get_tx_le(sky2); |
1218 |
le->tx.addr = cpu_to_le32((u32) mapping); |
1219 |
le->length = cpu_to_le16(len); |
1220 |
le->ctrl = ctrl; |
1221 |
le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); |
1222 |
|
1223 |
/* Record the transmit mapping info */ |
1224 |
re->skb = skb; |
1225 |
pci_unmap_addr_set(re, mapaddr, mapping); |
1226 |
|
1227 |
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1228 |
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1229 |
struct tx_ring_info *fre; |
1230 |
|
1231 |
mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, |
1232 |
frag->size, PCI_DMA_TODEVICE); |
1233 |
addr64 = high32(mapping); |
1234 |
if (addr64 != sky2->tx_addr64) { |
1235 |
le = get_tx_le(sky2); |
1236 |
le->tx.addr = cpu_to_le32(addr64); |
1237 |
le->ctrl = 0; |
1238 |
le->opcode = OP_ADDR64 | HW_OWNER; |
1239 |
sky2->tx_addr64 = addr64; |
1240 |
} |
1241 |
|
1242 |
le = get_tx_le(sky2); |
1243 |
le->tx.addr = cpu_to_le32((u32) mapping); |
1244 |
le->length = cpu_to_le16(frag->size); |
1245 |
le->ctrl = ctrl; |
1246 |
le->opcode = OP_BUFFER | HW_OWNER; |
1247 |
|
1248 |
fre = sky2->tx_ring |
1249 |
+ ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE; |
1250 |
pci_unmap_addr_set(fre, mapaddr, mapping); |
1251 |
} |
1252 |
|
1253 |
re->idx = sky2->tx_prod; |
1254 |
le->ctrl |= EOP; |
1255 |
|
1256 |
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, |
1257 |
&sky2->tx_last_put, TX_RING_SIZE); |
1258 |
|
1259 |
if (tx_avail(sky2) <= MAX_SKB_TX_LE) |
1260 |
netif_stop_queue(dev); |
1261 |
|
1262 |
out_unlock: |
1263 |
spin_unlock(&sky2->tx_lock); |
1264 |
|
1265 |
dev->trans_start = jiffies; |
1266 |
return NETDEV_TX_OK; |
1267 |
} |
1268 |
|
1269 |
/* |
1270 |
* Free ring elements from starting at tx_cons until "done" |
1271 |
* |
1272 |
* NB: the hardware will tell us about partial completion of multi-part |
1273 |
* buffers; these are deferred until completion. |
1274 |
*/ |
1275 |
static void sky2_tx_complete(struct sky2_port *sky2, u16 done) |
1276 |
{ |
1277 |
struct net_device *dev = sky2->netdev; |
1278 |
struct pci_dev *pdev = sky2->hw->pdev; |
1279 |
u16 nxt, put; |
1280 |
unsigned i; |
1281 |
|
1282 |
BUG_ON(done >= TX_RING_SIZE); |
1283 |
|
1284 |
if (unlikely(netif_msg_tx_done(sky2))) |
1285 |
printk(KERN_DEBUG "%s: tx done, up to %u\n", |
1286 |
dev->name, done); |
1287 |
|
1288 |
for (put = sky2->tx_cons; put != done; put = nxt) { |
1289 |
struct tx_ring_info *re = sky2->tx_ring + put; |
1290 |
struct sk_buff *skb = re->skb; |
1291 |
|
1292 |
nxt = re->idx; |
1293 |
BUG_ON(nxt >= TX_RING_SIZE); |
1294 |
prefetch(sky2->tx_ring + nxt); |
1295 |
|
1296 |
/* Check for partial status */ |
1297 |
if (tx_dist(put, done) < tx_dist(put, nxt)) |
1298 |
break; |
1299 |
|
1300 |
skb = re->skb; |
1301 |
pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), |
1302 |
skb_headlen(skb), PCI_DMA_TODEVICE); |
1303 |
|
1304 |
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1305 |
struct tx_ring_info *fre; |
1306 |
fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; |
1307 |
pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), |
1308 |
skb_shinfo(skb)->frags[i].size, |
1309 |
PCI_DMA_TODEVICE); |
1310 |
} |
1311 |
|
1312 |
dev_kfree_skb_any(skb); |
1313 |
} |
1314 |
|
1315 |
sky2->tx_cons = put; |
1316 |
if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) |
1317 |
netif_wake_queue(dev); |
1318 |
} |
1319 |
|
1320 |
/* Cleanup all untransmitted buffers, assume transmitter not running */ |
1321 |
static void sky2_tx_clean(struct sky2_port *sky2) |
1322 |
{ |
1323 |
spin_lock_bh(&sky2->tx_lock); |
1324 |
sky2_tx_complete(sky2, sky2->tx_prod); |
1325 |
spin_unlock_bh(&sky2->tx_lock); |
1326 |
} |
1327 |
|
1328 |
/* Network shutdown */ |
1329 |
static int sky2_down(struct net_device *dev) |
1330 |
{ |
1331 |
struct sky2_port *sky2 = netdev_priv(dev); |
1332 |
struct sky2_hw *hw = sky2->hw; |
1333 |
unsigned port = sky2->port; |
1334 |
u16 ctrl; |
1335 |
|
1336 |
/* Never really got started! */ |
1337 |
if (!sky2->tx_le) |
1338 |
return 0; |
1339 |
|
1340 |
if (netif_msg_ifdown(sky2)) |
1341 |
printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); |
1342 |
|
1343 |
/* Stop more packets from being queued */ |
1344 |
netif_stop_queue(dev); |
1345 |
|
1346 |
/* Disable port IRQ */ |
1347 |
local_irq_disable(); |
1348 |
hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); |
1349 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1350 |
local_irq_enable(); |
1351 |
|
1352 |
flush_scheduled_work(); |
1353 |
|
1354 |
sky2_phy_reset(hw, port); |
1355 |
|
1356 |
/* Stop transmitter */ |
1357 |
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); |
1358 |
sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); |
1359 |
|
1360 |
sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), |
1361 |
RB_RST_SET | RB_DIS_OP_MD); |
1362 |
|
1363 |
ctrl = gma_read16(hw, port, GM_GP_CTRL); |
1364 |
ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); |
1365 |
gma_write16(hw, port, GM_GP_CTRL, ctrl); |
1366 |
|
1367 |
sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); |
1368 |
|
1369 |
/* Workaround shared GMAC reset */ |
1370 |
if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 |
1371 |
&& port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) |
1372 |
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); |
1373 |
|
1374 |
/* Disable Force Sync bit and Enable Alloc bit */ |
1375 |
sky2_write8(hw, SK_REG(port, TXA_CTRL), |
1376 |
TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); |
1377 |
|
1378 |
/* Stop Interval Timer and Limit Counter of Tx Arbiter */ |
1379 |
sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); |
1380 |
sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); |
1381 |
|
1382 |
/* Reset the PCI FIFO of the async Tx queue */ |
1383 |
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), |
1384 |
BMU_RST_SET | BMU_FIFO_RST); |
1385 |
|
1386 |
/* Reset the Tx prefetch units */ |
1387 |
sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), |
1388 |
PREF_UNIT_RST_SET); |
1389 |
|
1390 |
sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); |
1391 |
|
1392 |
sky2_rx_stop(sky2); |
1393 |
|
1394 |
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); |
1395 |
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); |
1396 |
|
1397 |
/* turn off LED's */ |
1398 |
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); |
1399 |
|
1400 |
synchronize_irq(hw->pdev->irq); |
1401 |
|
1402 |
sky2_tx_clean(sky2); |
1403 |
sky2_rx_clean(sky2); |
1404 |
|
1405 |
pci_free_consistent(hw->pdev, RX_LE_BYTES, |
1406 |
sky2->rx_le, sky2->rx_le_map); |
1407 |
kfree(sky2->rx_ring); |
1408 |
|
1409 |
pci_free_consistent(hw->pdev, |
1410 |
TX_RING_SIZE * sizeof(struct sky2_tx_le), |
1411 |
sky2->tx_le, sky2->tx_le_map); |
1412 |
kfree(sky2->tx_ring); |
1413 |
|
1414 |
sky2->tx_le = NULL; |
1415 |
sky2->rx_le = NULL; |
1416 |
|
1417 |
sky2->rx_ring = NULL; |
1418 |
sky2->tx_ring = NULL; |
1419 |
|
1420 |
return 0; |
1421 |
} |
1422 |
|
1423 |
static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) |
1424 |
{ |
1425 |
if (!hw->copper) |
1426 |
return SPEED_1000; |
1427 |
|
1428 |
if (hw->chip_id == CHIP_ID_YUKON_FE) |
1429 |
return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10; |
1430 |
|
1431 |
switch (aux & PHY_M_PS_SPEED_MSK) { |
1432 |
case PHY_M_PS_SPEED_1000: |
1433 |
return SPEED_1000; |
1434 |
case PHY_M_PS_SPEED_100: |
1435 |
return SPEED_100; |
1436 |
default: |
1437 |
return SPEED_10; |
1438 |
} |
1439 |
} |
1440 |
|
1441 |
static void sky2_link_up(struct sky2_port *sky2) |
1442 |
{ |
1443 |
struct sky2_hw *hw = sky2->hw; |
1444 |
unsigned port = sky2->port; |
1445 |
u16 reg; |
1446 |
|
1447 |
/* Enable Transmit FIFO Underrun */ |
1448 |
sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); |
1449 |
|
1450 |
reg = gma_read16(hw, port, GM_GP_CTRL); |
1451 |
if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE) |
1452 |
reg |= GM_GPCR_DUP_FULL; |
1453 |
|
1454 |
/* enable Rx/Tx */ |
1455 |
reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; |
1456 |
gma_write16(hw, port, GM_GP_CTRL, reg); |
1457 |
gma_read16(hw, port, GM_GP_CTRL); |
1458 |
|
1459 |
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); |
1460 |
|
1461 |
netif_carrier_on(sky2->netdev); |
1462 |
netif_wake_queue(sky2->netdev); |
1463 |
|
1464 |
/* Turn on link LED */ |
1465 |
sky2_write8(hw, SK_REG(port, LNK_LED_REG), |
1466 |
LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); |
1467 |
|
1468 |
if (hw->chip_id == CHIP_ID_YUKON_XL) { |
1469 |
u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
1470 |
|
1471 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
1472 |
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ |
1473 |
PHY_M_LEDC_INIT_CTRL(sky2->speed == |
1474 |
SPEED_10 ? 7 : 0) | |
1475 |
PHY_M_LEDC_STA1_CTRL(sky2->speed == |
1476 |
SPEED_100 ? 7 : 0) | |
1477 |
PHY_M_LEDC_STA0_CTRL(sky2->speed == |
1478 |
SPEED_1000 ? 7 : 0)); |
1479 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
1480 |
} |
1481 |
|
1482 |
if (netif_msg_link(sky2)) |
1483 |
printk(KERN_INFO PFX |
1484 |
"%s: Link is up at %d Mbps, %s duplex, flow control %s\n", |
1485 |
sky2->netdev->name, sky2->speed, |
1486 |
sky2->duplex == DUPLEX_FULL ? "full" : "half", |
1487 |
(sky2->tx_pause && sky2->rx_pause) ? "both" : |
1488 |
sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none"); |
1489 |
} |
1490 |
|
1491 |
static void sky2_link_down(struct sky2_port *sky2) |
1492 |
{ |
1493 |
struct sky2_hw *hw = sky2->hw; |
1494 |
unsigned port = sky2->port; |
1495 |
u16 reg; |
1496 |
|
1497 |
gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); |
1498 |
|
1499 |
reg = gma_read16(hw, port, GM_GP_CTRL); |
1500 |
reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); |
1501 |
gma_write16(hw, port, GM_GP_CTRL, reg); |
1502 |
gma_read16(hw, port, GM_GP_CTRL); /* PCI post */ |
1503 |
|
1504 |
if (sky2->rx_pause && !sky2->tx_pause) { |
1505 |
/* restore Asymmetric Pause bit */ |
1506 |
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, |
1507 |
gm_phy_read(hw, port, PHY_MARV_AUNE_ADV) |
1508 |
| PHY_M_AN_ASP); |
1509 |
} |
1510 |
|
1511 |
netif_carrier_off(sky2->netdev); |
1512 |
netif_stop_queue(sky2->netdev); |
1513 |
|
1514 |
/* Turn on link LED */ |
1515 |
sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); |
1516 |
|
1517 |
if (netif_msg_link(sky2)) |
1518 |
printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name); |
1519 |
sky2_phy_init(hw, port); |
1520 |
} |
1521 |
|
1522 |
static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) |
1523 |
{ |
1524 |
struct sky2_hw *hw = sky2->hw; |
1525 |
unsigned port = sky2->port; |
1526 |
u16 lpa; |
1527 |
|
1528 |
lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); |
1529 |
|
1530 |
if (lpa & PHY_M_AN_RF) { |
1531 |
printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name); |
1532 |
return -1; |
1533 |
} |
1534 |
|
1535 |
if (hw->chip_id != CHIP_ID_YUKON_FE && |
1536 |
gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { |
1537 |
printk(KERN_ERR PFX "%s: master/slave fault", |
1538 |
sky2->netdev->name); |
1539 |
return -1; |
1540 |
} |
1541 |
|
1542 |
if (!(aux & PHY_M_PS_SPDUP_RES)) { |
1543 |
printk(KERN_ERR PFX "%s: speed/duplex mismatch", |
1544 |
sky2->netdev->name); |
1545 |
return -1; |
1546 |
} |
1547 |
|
1548 |
sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; |
1549 |
|
1550 |
sky2->speed = sky2_phy_speed(hw, aux); |
1551 |
|
1552 |
/* Pause bits are offset (9..8) */ |
1553 |
if (hw->chip_id == CHIP_ID_YUKON_XL) |
1554 |
aux >>= 6; |
1555 |
|
1556 |
sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0; |
1557 |
sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0; |
1558 |
|
1559 |
if ((sky2->tx_pause || sky2->rx_pause) |
1560 |
&& !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF)) |
1561 |
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); |
1562 |
else |
1563 |
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); |
1564 |
|
1565 |
return 0; |
1566 |
} |
1567 |
|
1568 |
/* |
1569 |
* Interrupt from PHY are handled outside of interrupt context |
1570 |
* because accessing phy registers requires spin wait which might |
1571 |
* cause excess interrupt latency. |
1572 |
*/ |
1573 |
static void sky2_phy_task(void *arg) |
1574 |
{ |
1575 |
struct sky2_port *sky2 = arg; |
1576 |
struct sky2_hw *hw = sky2->hw; |
1577 |
u16 istatus, phystat; |
1578 |
|
1579 |
down(&sky2->phy_sema); |
1580 |
istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT); |
1581 |
phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT); |
1582 |
|
1583 |
if (netif_msg_intr(sky2)) |
1584 |
printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", |
1585 |
sky2->netdev->name, istatus, phystat); |
1586 |
|
1587 |
if (istatus & PHY_M_IS_AN_COMPL) { |
1588 |
if (sky2_autoneg_done(sky2, phystat) == 0) |
1589 |
sky2_link_up(sky2); |
1590 |
goto out; |
1591 |
} |
1592 |
|
1593 |
if (istatus & PHY_M_IS_LSP_CHANGE) |
1594 |
sky2->speed = sky2_phy_speed(hw, phystat); |
1595 |
|
1596 |
if (istatus & PHY_M_IS_DUP_CHANGE) |
1597 |
sky2->duplex = |
1598 |
(phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; |
1599 |
|
1600 |
if (istatus & PHY_M_IS_LST_CHANGE) { |
1601 |
if (phystat & PHY_M_PS_LINK_UP) |
1602 |
sky2_link_up(sky2); |
1603 |
else |
1604 |
sky2_link_down(sky2); |
1605 |
} |
1606 |
out: |
1607 |
up(&sky2->phy_sema); |
1608 |
|
1609 |
local_irq_disable(); |
1610 |
hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; |
1611 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1612 |
local_irq_enable(); |
1613 |
} |
1614 |
|
1615 |
|
1616 |
/* Transmit timeout is only called if we are running, carries is up |
1617 |
* and tx queue is full (stopped). |
1618 |
*/ |
1619 |
static void sky2_tx_timeout(struct net_device *dev) |
1620 |
{ |
1621 |
struct sky2_port *sky2 = netdev_priv(dev); |
1622 |
struct sky2_hw *hw = sky2->hw; |
1623 |
unsigned txq = txqaddr[sky2->port]; |
1624 |
u16 ridx; |
1625 |
|
1626 |
/* Maybe we just missed an status interrupt */ |
1627 |
spin_lock(&sky2->tx_lock); |
1628 |
ridx = sky2_read16(hw, |
1629 |
sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX); |
1630 |
sky2_tx_complete(sky2, ridx); |
1631 |
spin_unlock(&sky2->tx_lock); |
1632 |
|
1633 |
if (!netif_queue_stopped(dev)) { |
1634 |
if (net_ratelimit()) |
1635 |
pr_info(PFX "transmit interrupt missed? recovered\n"); |
1636 |
return; |
1637 |
} |
1638 |
|
1639 |
if (netif_msg_timer(sky2)) |
1640 |
printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); |
1641 |
|
1642 |
sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); |
1643 |
sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); |
1644 |
|
1645 |
sky2_tx_clean(sky2); |
1646 |
|
1647 |
sky2_qset(hw, txq); |
1648 |
sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); |
1649 |
} |
1650 |
|
1651 |
|
1652 |
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) |
1653 |
/* Want receive buffer size to be multiple of 64 bits, and incl room for vlan */ |
1654 |
static inline unsigned sky2_buf_size(int mtu) |
1655 |
{ |
1656 |
return roundup(mtu + ETH_HLEN + 4, 8); |
1657 |
} |
1658 |
|
1659 |
static int sky2_change_mtu(struct net_device *dev, int new_mtu) |
1660 |
{ |
1661 |
struct sky2_port *sky2 = netdev_priv(dev); |
1662 |
struct sky2_hw *hw = sky2->hw; |
1663 |
int err; |
1664 |
u16 ctl, mode; |
1665 |
|
1666 |
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
1667 |
return -EINVAL; |
1668 |
|
1669 |
if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN) |
1670 |
return -EINVAL; |
1671 |
|
1672 |
if (!netif_running(dev)) { |
1673 |
dev->mtu = new_mtu; |
1674 |
return 0; |
1675 |
} |
1676 |
|
1677 |
sky2_write32(hw, B0_IMSK, 0); |
1678 |
|
1679 |
dev->trans_start = jiffies; /* prevent tx timeout */ |
1680 |
netif_stop_queue(dev); |
1681 |
netif_poll_disable(hw->dev[0]); |
1682 |
|
1683 |
ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); |
1684 |
gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); |
1685 |
sky2_rx_stop(sky2); |
1686 |
sky2_rx_clean(sky2); |
1687 |
|
1688 |
dev->mtu = new_mtu; |
1689 |
sky2->rx_bufsize = sky2_buf_size(new_mtu); |
1690 |
mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | |
1691 |
GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); |
1692 |
|
1693 |
if (dev->mtu > ETH_DATA_LEN) |
1694 |
mode |= GM_SMOD_JUMBO_ENA; |
1695 |
|
1696 |
gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode); |
1697 |
|
1698 |
sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); |
1699 |
|
1700 |
err = sky2_rx_start(sky2); |
1701 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1702 |
|
1703 |
if (err) |
1704 |
dev_close(dev); |
1705 |
else { |
1706 |
gma_write16(hw, sky2->port, GM_GP_CTRL, ctl); |
1707 |
|
1708 |
netif_poll_enable(hw->dev[0]); |
1709 |
netif_wake_queue(dev); |
1710 |
} |
1711 |
|
1712 |
return err; |
1713 |
} |
1714 |
|
1715 |
/* |
1716 |
* Receive one packet. |
1717 |
* For small packets or errors, just reuse existing skb. |
1718 |
* For larger packets, get new buffer. |
1719 |
*/ |
1720 |
static struct sk_buff *sky2_receive(struct sky2_port *sky2, |
1721 |
u16 length, u32 status) |
1722 |
{ |
1723 |
struct ring_info *re = sky2->rx_ring + sky2->rx_next; |
1724 |
struct sk_buff *skb = NULL; |
1725 |
|
1726 |
if (unlikely(netif_msg_rx_status(sky2))) |
1727 |
printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", |
1728 |
sky2->netdev->name, sky2->rx_next, status, length); |
1729 |
|
1730 |
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; |
1731 |
prefetch(sky2->rx_ring + sky2->rx_next); |
1732 |
|
1733 |
if (status & GMR_FS_ANY_ERR) |
1734 |
goto error; |
1735 |
|
1736 |
if (!(status & GMR_FS_RX_OK)) |
1737 |
goto resubmit; |
1738 |
|
1739 |
if ((status >> 16) != length || length > sky2->rx_bufsize) |
1740 |
goto oversize; |
1741 |
|
1742 |
if (length < copybreak) { |
1743 |
skb = alloc_skb(length + 2, GFP_ATOMIC); |
1744 |
if (!skb) |
1745 |
goto resubmit; |
1746 |
|
1747 |
skb_reserve(skb, 2); |
1748 |
pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr, |
1749 |
length, PCI_DMA_FROMDEVICE); |
1750 |
memcpy(skb->data, re->skb->data, length); |
1751 |
skb->ip_summed = re->skb->ip_summed; |
1752 |
skb->csum = re->skb->csum; |
1753 |
pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr, |
1754 |
length, PCI_DMA_FROMDEVICE); |
1755 |
} else { |
1756 |
struct sk_buff *nskb; |
1757 |
|
1758 |
nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC); |
1759 |
if (!nskb) |
1760 |
goto resubmit; |
1761 |
|
1762 |
skb = re->skb; |
1763 |
re->skb = nskb; |
1764 |
pci_unmap_single(sky2->hw->pdev, re->mapaddr, |
1765 |
sky2->rx_bufsize, PCI_DMA_FROMDEVICE); |
1766 |
prefetch(skb->data); |
1767 |
|
1768 |
re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data, |
1769 |
sky2->rx_bufsize, PCI_DMA_FROMDEVICE); |
1770 |
} |
1771 |
|
1772 |
skb_put(skb, length); |
1773 |
resubmit: |
1774 |
re->skb->ip_summed = CHECKSUM_NONE; |
1775 |
sky2_rx_add(sky2, re->mapaddr); |
1776 |
|
1777 |
/* Tell receiver about new buffers. */ |
1778 |
sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put, |
1779 |
&sky2->rx_last_put, RX_LE_SIZE); |
1780 |
|
1781 |
return skb; |
1782 |
|
1783 |
oversize: |
1784 |
++sky2->net_stats.rx_over_errors; |
1785 |
goto resubmit; |
1786 |
|
1787 |
error: |
1788 |
++sky2->net_stats.rx_errors; |
1789 |
|
1790 |
if (netif_msg_rx_err(sky2) && net_ratelimit()) |
1791 |
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", |
1792 |
sky2->netdev->name, status, length); |
1793 |
|
1794 |
if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE)) |
1795 |
sky2->net_stats.rx_length_errors++; |
1796 |
if (status & GMR_FS_FRAGMENT) |
1797 |
sky2->net_stats.rx_frame_errors++; |
1798 |
if (status & GMR_FS_CRC_ERR) |
1799 |
sky2->net_stats.rx_crc_errors++; |
1800 |
if (status & GMR_FS_RX_FF_OV) |
1801 |
sky2->net_stats.rx_fifo_errors++; |
1802 |
|
1803 |
goto resubmit; |
1804 |
} |
1805 |
|
1806 |
/* |
1807 |
* Check for transmit complete |
1808 |
*/ |
1809 |
#define TX_NO_STATUS 0xffff |
1810 |
|
1811 |
static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) |
1812 |
{ |
1813 |
if (last != TX_NO_STATUS) { |
1814 |
struct net_device *dev = hw->dev[port]; |
1815 |
if (dev && netif_running(dev)) { |
1816 |
struct sky2_port *sky2 = netdev_priv(dev); |
1817 |
|
1818 |
spin_lock(&sky2->tx_lock); |
1819 |
sky2_tx_complete(sky2, last); |
1820 |
spin_unlock(&sky2->tx_lock); |
1821 |
} |
1822 |
} |
1823 |
} |
1824 |
|
1825 |
/* |
1826 |
* Both ports share the same status interrupt, therefore there is only |
1827 |
* one poll routine. |
1828 |
*/ |
1829 |
static int sky2_poll(struct net_device *dev0, int *budget) |
1830 |
{ |
1831 |
struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; |
1832 |
unsigned int to_do = min(dev0->quota, *budget); |
1833 |
unsigned int work_done = 0; |
1834 |
u16 hwidx; |
1835 |
u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS }; |
1836 |
|
1837 |
hwidx = sky2_read16(hw, STAT_PUT_IDX); |
1838 |
BUG_ON(hwidx >= STATUS_RING_SIZE); |
1839 |
rmb(); |
1840 |
|
1841 |
while (hwidx != hw->st_idx) { |
1842 |
struct sky2_status_le *le = hw->st_le + hw->st_idx; |
1843 |
struct net_device *dev; |
1844 |
struct sky2_port *sky2; |
1845 |
struct sk_buff *skb; |
1846 |
u32 status; |
1847 |
u16 length; |
1848 |
|
1849 |
le = hw->st_le + hw->st_idx; |
1850 |
hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; |
1851 |
prefetch(hw->st_le + hw->st_idx); |
1852 |
|
1853 |
BUG_ON(le->link >= 2); |
1854 |
dev = hw->dev[le->link]; |
1855 |
if (dev == NULL || !netif_running(dev)) |
1856 |
continue; |
1857 |
|
1858 |
sky2 = netdev_priv(dev); |
1859 |
status = le32_to_cpu(le->status); |
1860 |
length = le16_to_cpu(le->length); |
1861 |
|
1862 |
switch (le->opcode & ~HW_OWNER) { |
1863 |
case OP_RXSTAT: |
1864 |
skb = sky2_receive(sky2, length, status); |
1865 |
if (!skb) |
1866 |
break; |
1867 |
|
1868 |
skb->dev = dev; |
1869 |
skb->protocol = eth_type_trans(skb, dev); |
1870 |
dev->last_rx = jiffies; |
1871 |
|
1872 |
#ifdef SKY2_VLAN_TAG_USED |
1873 |
if (sky2->vlgrp && (status & GMR_FS_VLAN)) { |
1874 |
vlan_hwaccel_receive_skb(skb, |
1875 |
sky2->vlgrp, |
1876 |
be16_to_cpu(sky2->rx_tag)); |
1877 |
} else |
1878 |
#endif |
1879 |
netif_receive_skb(skb); |
1880 |
|
1881 |
if (++work_done >= to_do) |
1882 |
goto exit_loop; |
1883 |
break; |
1884 |
|
1885 |
#ifdef SKY2_VLAN_TAG_USED |
1886 |
case OP_RXVLAN: |
1887 |
sky2->rx_tag = length; |
1888 |
break; |
1889 |
|
1890 |
case OP_RXCHKSVLAN: |
1891 |
sky2->rx_tag = length; |
1892 |
/* fall through */ |
1893 |
#endif |
1894 |
case OP_RXCHKS: |
1895 |
skb = sky2->rx_ring[sky2->rx_next].skb; |
1896 |
skb->ip_summed = CHECKSUM_HW; |
1897 |
skb->csum = le16_to_cpu(status); |
1898 |
break; |
1899 |
|
1900 |
case OP_TXINDEXLE: |
1901 |
/* TX index reports status for both ports */ |
1902 |
tx_done[0] = status & 0xffff; |
1903 |
tx_done[1] = ((status >> 24) & 0xff) |
1904 |
| (u16)(length & 0xf) << 8; |
1905 |
break; |
1906 |
|
1907 |
default: |
1908 |
if (net_ratelimit()) |
1909 |
printk(KERN_WARNING PFX |
1910 |
"unknown status opcode 0x%x\n", le->opcode); |
1911 |
break; |
1912 |
} |
1913 |
} |
1914 |
|
1915 |
exit_loop: |
1916 |
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); |
1917 |
|
1918 |
sky2_tx_check(hw, 0, tx_done[0]); |
1919 |
sky2_tx_check(hw, 1, tx_done[1]); |
1920 |
|
1921 |
if (sky2_read16(hw, STAT_PUT_IDX) == hw->st_idx) { |
1922 |
/* need to restart TX timer */ |
1923 |
if (is_ec_a1(hw)) { |
1924 |
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); |
1925 |
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); |
1926 |
} |
1927 |
|
1928 |
netif_rx_complete(dev0); |
1929 |
hw->intr_mask |= Y2_IS_STAT_BMU; |
1930 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1931 |
return 0; |
1932 |
} else { |
1933 |
*budget -= work_done; |
1934 |
dev0->quota -= work_done; |
1935 |
return 1; |
1936 |
} |
1937 |
} |
1938 |
|
1939 |
static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) |
1940 |
{ |
1941 |
struct net_device *dev = hw->dev[port]; |
1942 |
|
1943 |
if (net_ratelimit()) |
1944 |
printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", |
1945 |
dev->name, status); |
1946 |
|
1947 |
if (status & Y2_IS_PAR_RD1) { |
1948 |
if (net_ratelimit()) |
1949 |
printk(KERN_ERR PFX "%s: ram data read parity error\n", |
1950 |
dev->name); |
1951 |
/* Clear IRQ */ |
1952 |
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); |
1953 |
} |
1954 |
|
1955 |
if (status & Y2_IS_PAR_WR1) { |
1956 |
if (net_ratelimit()) |
1957 |
printk(KERN_ERR PFX "%s: ram data write parity error\n", |
1958 |
dev->name); |
1959 |
|
1960 |
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); |
1961 |
} |
1962 |
|
1963 |
if (status & Y2_IS_PAR_MAC1) { |
1964 |
if (net_ratelimit()) |
1965 |
printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); |
1966 |
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); |
1967 |
} |
1968 |
|
1969 |
if (status & Y2_IS_PAR_RX1) { |
1970 |
if (net_ratelimit()) |
1971 |
printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); |
1972 |
sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); |
1973 |
} |
1974 |
|
1975 |
if (status & Y2_IS_TCP_TXA1) { |
1976 |
if (net_ratelimit()) |
1977 |
printk(KERN_ERR PFX "%s: TCP segmentation error\n", |
1978 |
dev->name); |
1979 |
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); |
1980 |
} |
1981 |
} |
1982 |
|
1983 |
static void sky2_hw_intr(struct sky2_hw *hw) |
1984 |
{ |
1985 |
u32 status = sky2_read32(hw, B0_HWE_ISRC); |
1986 |
|
1987 |
if (status & Y2_IS_TIST_OV) |
1988 |
sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); |
1989 |
|
1990 |
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { |
1991 |
u16 pci_err; |
1992 |
|
1993 |
pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); |
1994 |
if (net_ratelimit()) |
1995 |
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", |
1996 |
pci_name(hw->pdev), pci_err); |
1997 |
|
1998 |
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
1999 |
pci_write_config_word(hw->pdev, PCI_STATUS, |
2000 |
pci_err | PCI_STATUS_ERROR_BITS); |
2001 |
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
2002 |
} |
2003 |
|
2004 |
if (status & Y2_IS_PCI_EXP) { |
2005 |
/* PCI-Express uncorrectable Error occurred */ |
2006 |
u32 pex_err; |
2007 |
|
2008 |
pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); |
2009 |
|
2010 |
if (net_ratelimit()) |
2011 |
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", |
2012 |
pci_name(hw->pdev), pex_err); |
2013 |
|
2014 |
/* clear the interrupt */ |
2015 |
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
2016 |
pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, |
2017 |
0xffffffffUL); |
2018 |
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
2019 |
|
2020 |
if (pex_err & PEX_FATAL_ERRORS) { |
2021 |
u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); |
2022 |
hwmsk &= ~Y2_IS_PCI_EXP; |
2023 |
sky2_write32(hw, B0_HWE_IMSK, hwmsk); |
2024 |
} |
2025 |
} |
2026 |
|
2027 |
if (status & Y2_HWE_L1_MASK) |
2028 |
sky2_hw_error(hw, 0, status); |
2029 |
status >>= 8; |
2030 |
if (status & Y2_HWE_L1_MASK) |
2031 |
sky2_hw_error(hw, 1, status); |
2032 |
} |
2033 |
|
2034 |
static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) |
2035 |
{ |
2036 |
struct net_device *dev = hw->dev[port]; |
2037 |
struct sky2_port *sky2 = netdev_priv(dev); |
2038 |
u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); |
2039 |
|
2040 |
if (netif_msg_intr(sky2)) |
2041 |
printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n", |
2042 |
dev->name, status); |
2043 |
|
2044 |
if (status & GM_IS_RX_FF_OR) { |
2045 |
++sky2->net_stats.rx_fifo_errors; |
2046 |
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); |
2047 |
} |
2048 |
|
2049 |
if (status & GM_IS_TX_FF_UR) { |
2050 |
++sky2->net_stats.tx_fifo_errors; |
2051 |
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); |
2052 |
} |
2053 |
} |
2054 |
|
2055 |
static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) |
2056 |
{ |
2057 |
struct net_device *dev = hw->dev[port]; |
2058 |
struct sky2_port *sky2 = netdev_priv(dev); |
2059 |
|
2060 |
hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); |
2061 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
2062 |
schedule_work(&sky2->phy_task); |
2063 |
} |
2064 |
|
2065 |
static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) |
2066 |
{ |
2067 |
struct sky2_hw *hw = dev_id; |
2068 |
struct net_device *dev0 = hw->dev[0]; |
2069 |
u32 status; |
2070 |
|
2071 |
status = sky2_read32(hw, B0_Y2_SP_ISRC2); |
2072 |
if (status == 0 || status == ~0) |
2073 |
return IRQ_NONE; |
2074 |
|
2075 |
if (status & Y2_IS_HW_ERR) |
2076 |
sky2_hw_intr(hw); |
2077 |
|
2078 |
/* Do NAPI for Rx and Tx status */ |
2079 |
if (status & Y2_IS_STAT_BMU) { |
2080 |
hw->intr_mask &= ~Y2_IS_STAT_BMU; |
2081 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
2082 |
|
2083 |
if (likely(__netif_rx_schedule_prep(dev0))) { |
2084 |
prefetch(&hw->st_le[hw->st_idx]); |
2085 |
__netif_rx_schedule(dev0); |
2086 |
} |
2087 |
} |
2088 |
|
2089 |
if (status & Y2_IS_IRQ_PHY1) |
2090 |
sky2_phy_intr(hw, 0); |
2091 |
|
2092 |
if (status & Y2_IS_IRQ_PHY2) |
2093 |
sky2_phy_intr(hw, 1); |
2094 |
|
2095 |
if (status & Y2_IS_IRQ_MAC1) |
2096 |
sky2_mac_intr(hw, 0); |
2097 |
|
2098 |
if (status & Y2_IS_IRQ_MAC2) |
2099 |
sky2_mac_intr(hw, 1); |
2100 |
|
2101 |
sky2_write32(hw, B0_Y2_SP_ICR, 2); |
2102 |
|
2103 |
sky2_read32(hw, B0_IMSK); |
2104 |
|
2105 |
return IRQ_HANDLED; |
2106 |
} |
2107 |
|
2108 |
#ifdef CONFIG_NET_POLL_CONTROLLER |
2109 |
static void sky2_netpoll(struct net_device *dev) |
2110 |
{ |
2111 |
struct sky2_port *sky2 = netdev_priv(dev); |
2112 |
|
2113 |
sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL); |
2114 |
} |
2115 |
#endif |
2116 |
|
2117 |
/* Chip internal frequency for clock calculations */ |
2118 |
static inline u32 sky2_mhz(const struct sky2_hw *hw) |
2119 |
{ |
2120 |
switch (hw->chip_id) { |
2121 |
case CHIP_ID_YUKON_EC: |
2122 |
case CHIP_ID_YUKON_EC_U: |
2123 |
return 125; /* 125 Mhz */ |
2124 |
case CHIP_ID_YUKON_FE: |
2125 |
return 100; /* 100 Mhz */ |
2126 |
default: /* YUKON_XL */ |
2127 |
return 156; /* 156 Mhz */ |
2128 |
} |
2129 |
} |
2130 |
|
2131 |
static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) |
2132 |
{ |
2133 |
return sky2_mhz(hw) * us; |
2134 |
} |
2135 |
|
2136 |
static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) |
2137 |
{ |
2138 |
return clk / sky2_mhz(hw); |
2139 |
} |
2140 |
|
2141 |
|
2142 |
static int sky2_reset(struct sky2_hw *hw) |
2143 |
{ |
2144 |
u32 ctst; |
2145 |
u16 status; |
2146 |
u8 t8, pmd_type; |
2147 |
int i; |
2148 |
|
2149 |
ctst = sky2_read32(hw, B0_CTST); |
2150 |
|
2151 |
sky2_write8(hw, B0_CTST, CS_RST_CLR); |
2152 |
hw->chip_id = sky2_read8(hw, B2_CHIP_ID); |
2153 |
if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) { |
2154 |
printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", |
2155 |
pci_name(hw->pdev), hw->chip_id); |
2156 |
return -EOPNOTSUPP; |
2157 |
} |
2158 |
|
2159 |
/* ring for status responses */ |
2160 |
hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, |
2161 |
&hw->st_dma); |
2162 |
if (!hw->st_le) |
2163 |
return -ENOMEM; |
2164 |
|
2165 |
/* disable ASF */ |
2166 |
if (hw->chip_id <= CHIP_ID_YUKON_EC) { |
2167 |
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); |
2168 |
sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); |
2169 |
} |
2170 |
|
2171 |
/* do a SW reset */ |
2172 |
sky2_write8(hw, B0_CTST, CS_RST_SET); |
2173 |
sky2_write8(hw, B0_CTST, CS_RST_CLR); |
2174 |
|
2175 |
/* clear PCI errors, if any */ |
2176 |
pci_read_config_word(hw->pdev, PCI_STATUS, &status); |
2177 |
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
2178 |
pci_write_config_word(hw->pdev, PCI_STATUS, |
2179 |
status | PCI_STATUS_ERROR_BITS); |
2180 |
|
2181 |
sky2_write8(hw, B0_CTST, CS_MRST_CLR); |
2182 |
|
2183 |
/* clear any PEX errors */ |
2184 |
if (is_pciex(hw)) { |
2185 |
u16 lstat; |
2186 |
pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, |
2187 |
0xffffffffUL); |
2188 |
pci_read_config_word(hw->pdev, PEX_LNK_STAT, &lstat); |
2189 |
} |
2190 |
|
2191 |
pmd_type = sky2_read8(hw, B2_PMD_TYP); |
2192 |
hw->copper = !(pmd_type == 'L' || pmd_type == 'S'); |
2193 |
|
2194 |
hw->ports = 1; |
2195 |
t8 = sky2_read8(hw, B2_Y2_HW_RES); |
2196 |
if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { |
2197 |
if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) |
2198 |
++hw->ports; |
2199 |
} |
2200 |
hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; |
2201 |
|
2202 |
sky2_set_power_state(hw, PCI_D0); |
2203 |
|
2204 |
for (i = 0; i < hw->ports; i++) { |
2205 |
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); |
2206 |
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); |
2207 |
} |
2208 |
|
2209 |
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
2210 |
|
2211 |
/* Clear I2C IRQ noise */ |
2212 |
sky2_write32(hw, B2_I2C_IRQ, 1); |
2213 |
|
2214 |
/* turn off hardware timer (unused) */ |
2215 |
sky2_write8(hw, B2_TI_CTRL, TIM_STOP); |
2216 |
sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); |
2217 |
|
2218 |
sky2_write8(hw, B0_Y2LED, LED_STAT_ON); |
2219 |
|
2220 |
/* Turn off descriptor polling */ |
2221 |
sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); |
2222 |
|
2223 |
/* Turn off receive timestamp */ |
2224 |
sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); |
2225 |
sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); |
2226 |
|
2227 |
/* enable the Tx Arbiters */ |
2228 |
for (i = 0; i < hw->ports; i++) |
2229 |
sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); |
2230 |
|
2231 |
/* Initialize ram interface */ |
2232 |
for (i = 0; i < hw->ports; i++) { |
2233 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); |
2234 |
|
2235 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); |
2236 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); |
2237 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); |
2238 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); |
2239 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); |
2240 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); |
2241 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); |
2242 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); |
2243 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); |
2244 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); |
2245 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); |
2246 |
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); |
2247 |
} |
2248 |
|
2249 |
sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); |
2250 |
|
2251 |
for (i = 0; i < hw->ports; i++) |
2252 |
sky2_phy_reset(hw, i); |
2253 |
|
2254 |
memset(hw->st_le, 0, STATUS_LE_BYTES); |
2255 |
hw->st_idx = 0; |
2256 |
|
2257 |
sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); |
2258 |
sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); |
2259 |
|
2260 |
sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); |
2261 |
sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); |
2262 |
|
2263 |
/* Set the list last index */ |
2264 |
sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); |
2265 |
|
2266 |
/* These status setup values are copied from SysKonnect's driver */ |
2267 |
if (is_ec_a1(hw)) { |
2268 |
/* WA for dev. #4.3 */ |
2269 |
sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */ |
2270 |
|
2271 |
/* set Status-FIFO watermark */ |
2272 |
sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */ |
2273 |
|
2274 |
/* set Status-FIFO ISR watermark */ |
2275 |
sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */ |
2276 |
sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000)); |
2277 |
} else { |
2278 |
sky2_write16(hw, STAT_TX_IDX_TH, 10); |
2279 |
sky2_write8(hw, STAT_FIFO_WM, 16); |
2280 |
|
2281 |
/* set Status-FIFO ISR watermark */ |
2282 |
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) |
2283 |
sky2_write8(hw, STAT_FIFO_ISR_WM, 4); |
2284 |
else |
2285 |
sky2_write8(hw, STAT_FIFO_ISR_WM, 16); |
2286 |
|
2287 |
sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); |
2288 |
sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); |
2289 |
sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); |
2290 |
} |
2291 |
|
2292 |
/* enable status unit */ |
2293 |
sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); |
2294 |
|
2295 |
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); |
2296 |
sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); |
2297 |
sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); |
2298 |
|
2299 |
return 0; |
2300 |
} |
2301 |
|
2302 |
static u32 sky2_supported_modes(const struct sky2_hw *hw) |
2303 |
{ |
2304 |
u32 modes; |
2305 |
if (hw->copper) { |
2306 |
modes = SUPPORTED_10baseT_Half |
2307 |
| SUPPORTED_10baseT_Full |
2308 |
| SUPPORTED_100baseT_Half |
2309 |
| SUPPORTED_100baseT_Full |
2310 |
| SUPPORTED_Autoneg | SUPPORTED_TP; |
2311 |
|
2312 |
if (hw->chip_id != CHIP_ID_YUKON_FE) |
2313 |
modes |= SUPPORTED_1000baseT_Half |
2314 |
| SUPPORTED_1000baseT_Full; |
2315 |
} else |
2316 |
modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE |
2317 |
| SUPPORTED_Autoneg; |
2318 |
return modes; |
2319 |
} |
2320 |
|
2321 |
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
2322 |
{ |
2323 |
struct sky2_port *sky2 = netdev_priv(dev); |
2324 |
struct sky2_hw *hw = sky2->hw; |
2325 |
|
2326 |
ecmd->transceiver = XCVR_INTERNAL; |
2327 |
ecmd->supported = sky2_supported_modes(hw); |
2328 |
ecmd->phy_address = PHY_ADDR_MARV; |
2329 |
if (hw->copper) { |
2330 |
ecmd->supported = SUPPORTED_10baseT_Half |
2331 |
| SUPPORTED_10baseT_Full |
2332 |
| SUPPORTED_100baseT_Half |
2333 |
| SUPPORTED_100baseT_Full |
2334 |
| SUPPORTED_1000baseT_Half |
2335 |
| SUPPORTED_1000baseT_Full |
2336 |
| SUPPORTED_Autoneg | SUPPORTED_TP; |
2337 |
ecmd->port = PORT_TP; |
2338 |
} else |
2339 |
ecmd->port = PORT_FIBRE; |
2340 |
|
2341 |
ecmd->advertising = sky2->advertising; |
2342 |
ecmd->autoneg = sky2->autoneg; |
2343 |
ecmd->speed = sky2->speed; |
2344 |
ecmd->duplex = sky2->duplex; |
2345 |
return 0; |
2346 |
} |
2347 |
|
2348 |
static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
2349 |
{ |
2350 |
struct sky2_port *sky2 = netdev_priv(dev); |
2351 |
const struct sky2_hw *hw = sky2->hw; |
2352 |
u32 supported = sky2_supported_modes(hw); |
2353 |
|
2354 |
if (ecmd->autoneg == AUTONEG_ENABLE) { |
2355 |
ecmd->advertising = supported; |
2356 |
sky2->duplex = -1; |
2357 |
sky2->speed = -1; |
2358 |
} else { |
2359 |
u32 setting; |
2360 |
|
2361 |
switch (ecmd->speed) { |
2362 |
case SPEED_1000: |
2363 |
if (ecmd->duplex == DUPLEX_FULL) |
2364 |
setting = SUPPORTED_1000baseT_Full; |
2365 |
else if (ecmd->duplex == DUPLEX_HALF) |
2366 |
setting = SUPPORTED_1000baseT_Half; |
2367 |
else |
2368 |
return -EINVAL; |
2369 |
break; |
2370 |
case SPEED_100: |
2371 |
if (ecmd->duplex == DUPLEX_FULL) |
2372 |
setting = SUPPORTED_100baseT_Full; |
2373 |
else if (ecmd->duplex == DUPLEX_HALF) |
2374 |
setting = SUPPORTED_100baseT_Half; |
2375 |
else |
2376 |
return -EINVAL; |
2377 |
break; |
2378 |
|
2379 |
case SPEED_10: |
2380 |
if (ecmd->duplex == DUPLEX_FULL) |
2381 |
setting = SUPPORTED_10baseT_Full; |
2382 |
else if (ecmd->duplex == DUPLEX_HALF) |
2383 |
setting = SUPPORTED_10baseT_Half; |
2384 |
else |
2385 |
return -EINVAL; |
2386 |
break; |
2387 |
default: |
2388 |
return -EINVAL; |
2389 |
} |
2390 |
|
2391 |
if ((setting & supported) == 0) |
2392 |
return -EINVAL; |
2393 |
|
2394 |
sky2->speed = ecmd->speed; |
2395 |
sky2->duplex = ecmd->duplex; |
2396 |
} |
2397 |
|
2398 |
sky2->autoneg = ecmd->autoneg; |
2399 |
sky2->advertising = ecmd->advertising; |
2400 |
|
2401 |
if (netif_running(dev)) |
2402 |
sky2_phy_reinit(sky2); |
2403 |
|
2404 |
return 0; |
2405 |
} |
2406 |
|
2407 |
static void sky2_get_drvinfo(struct net_device *dev, |
2408 |
struct ethtool_drvinfo *info) |
2409 |
{ |
2410 |
struct sky2_port *sky2 = netdev_priv(dev); |
2411 |
|
2412 |
strcpy(info->driver, DRV_NAME); |
2413 |
strcpy(info->version, DRV_VERSION); |
2414 |
strcpy(info->fw_version, "N/A"); |
2415 |
strcpy(info->bus_info, pci_name(sky2->hw->pdev)); |
2416 |
} |
2417 |
|
2418 |
static const struct sky2_stat { |
2419 |
char name[ETH_GSTRING_LEN]; |
2420 |
u16 offset; |
2421 |
} sky2_stats[] = { |
2422 |
{ "tx_bytes", GM_TXO_OK_HI }, |
2423 |
{ "rx_bytes", GM_RXO_OK_HI }, |
2424 |
{ "tx_broadcast", GM_TXF_BC_OK }, |
2425 |
{ "rx_broadcast", GM_RXF_BC_OK }, |
2426 |
{ "tx_multicast", GM_TXF_MC_OK }, |
2427 |
{ "rx_multicast", GM_RXF_MC_OK }, |
2428 |
{ "tx_unicast", GM_TXF_UC_OK }, |
2429 |
{ "rx_unicast", GM_RXF_UC_OK }, |
2430 |
{ "tx_mac_pause", GM_TXF_MPAUSE }, |
2431 |
{ "rx_mac_pause", GM_RXF_MPAUSE }, |
2432 |
{ "collisions", GM_TXF_SNG_COL }, |
2433 |
{ "late_collision",GM_TXF_LAT_COL }, |
2434 |
{ "aborted", GM_TXF_ABO_COL }, |
2435 |
{ "multi_collisions", GM_TXF_MUL_COL }, |
2436 |
{ "fifo_underrun", GM_TXE_FIFO_UR }, |
2437 |
{ "fifo_overflow", GM_RXE_FIFO_OV }, |
2438 |
{ "rx_toolong", GM_RXF_LNG_ERR }, |
2439 |
{ "rx_jabber", GM_RXF_JAB_PKT }, |
2440 |
{ "rx_runt", GM_RXE_FRAG }, |
2441 |
{ "rx_too_long", GM_RXF_LNG_ERR }, |
2442 |
{ "rx_fcs_error", GM_RXF_FCS_ERR }, |
2443 |
}; |
2444 |
|
2445 |
static u32 sky2_get_rx_csum(struct net_device *dev) |
2446 |
{ |
2447 |
struct sky2_port *sky2 = netdev_priv(dev); |
2448 |
|
2449 |
return sky2->rx_csum; |
2450 |
} |
2451 |
|
2452 |
static int sky2_set_rx_csum(struct net_device *dev, u32 data) |
2453 |
{ |
2454 |
struct sky2_port *sky2 = netdev_priv(dev); |
2455 |
|
2456 |
sky2->rx_csum = data; |
2457 |
|
2458 |
sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), |
2459 |
data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); |
2460 |
|
2461 |
return 0; |
2462 |
} |
2463 |
|
2464 |
static u32 sky2_get_msglevel(struct net_device *netdev) |
2465 |
{ |
2466 |
struct sky2_port *sky2 = netdev_priv(netdev); |
2467 |
return sky2->msg_enable; |
2468 |
} |
2469 |
|
2470 |
static int sky2_nway_reset(struct net_device *dev) |
2471 |
{ |
2472 |
struct sky2_port *sky2 = netdev_priv(dev); |
2473 |
|
2474 |
if (sky2->autoneg != AUTONEG_ENABLE) |
2475 |
return -EINVAL; |
2476 |
|
2477 |
sky2_phy_reinit(sky2); |
2478 |
|
2479 |
return 0; |
2480 |
} |
2481 |
|
2482 |
static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) |
2483 |
{ |
2484 |
struct sky2_hw *hw = sky2->hw; |
2485 |
unsigned port = sky2->port; |
2486 |
int i; |
2487 |
|
2488 |
data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 |
2489 |
| (u64) gma_read32(hw, port, GM_TXO_OK_LO); |
2490 |
data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 |
2491 |
| (u64) gma_read32(hw, port, GM_RXO_OK_LO); |
2492 |
|
2493 |
for (i = 2; i < count; i++) |
2494 |
data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset); |
2495 |
} |
2496 |
|
2497 |
static void sky2_set_msglevel(struct net_device *netdev, u32 value) |
2498 |
{ |
2499 |
struct sky2_port *sky2 = netdev_priv(netdev); |
2500 |
sky2->msg_enable = value; |
2501 |
} |
2502 |
|
2503 |
static int sky2_get_stats_count(struct net_device *dev) |
2504 |
{ |
2505 |
return ARRAY_SIZE(sky2_stats); |
2506 |
} |
2507 |
|
2508 |
static void sky2_get_ethtool_stats(struct net_device *dev, |
2509 |
struct ethtool_stats *stats, u64 * data) |
2510 |
{ |
2511 |
struct sky2_port *sky2 = netdev_priv(dev); |
2512 |
|
2513 |
sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); |
2514 |
} |
2515 |
|
2516 |
static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) |
2517 |
{ |
2518 |
int i; |
2519 |
|
2520 |
switch (stringset) { |
2521 |
case ETH_SS_STATS: |
2522 |
for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) |
2523 |
memcpy(data + i * ETH_GSTRING_LEN, |
2524 |
sky2_stats[i].name, ETH_GSTRING_LEN); |
2525 |
break; |
2526 |
} |
2527 |
} |
2528 |
|
2529 |
/* Use hardware MIB variables for critical path statistics and |
2530 |
* transmit feedback not reported at interrupt. |
2531 |
* Other errors are accounted for in interrupt handler. |
2532 |
*/ |
2533 |
static struct net_device_stats *sky2_get_stats(struct net_device *dev) |
2534 |
{ |
2535 |
struct sky2_port *sky2 = netdev_priv(dev); |
2536 |
u64 data[13]; |
2537 |
|
2538 |
sky2_phy_stats(sky2, data, ARRAY_SIZE(data)); |
2539 |
|
2540 |
sky2->net_stats.tx_bytes = data[0]; |
2541 |
sky2->net_stats.rx_bytes = data[1]; |
2542 |
sky2->net_stats.tx_packets = data[2] + data[4] + data[6]; |
2543 |
sky2->net_stats.rx_packets = data[3] + data[5] + data[7]; |
2544 |
sky2->net_stats.multicast = data[5] + data[7]; |
2545 |
sky2->net_stats.collisions = data[10]; |
2546 |
sky2->net_stats.tx_aborted_errors = data[12]; |
2547 |
|
2548 |
return &sky2->net_stats; |
2549 |
} |
2550 |
|
2551 |
static int sky2_set_mac_address(struct net_device *dev, void *p) |
2552 |
{ |
2553 |
struct sky2_port *sky2 = netdev_priv(dev); |
2554 |
struct sockaddr *addr = p; |
2555 |
|
2556 |
if (!is_valid_ether_addr(addr->sa_data)) |
2557 |
return -EADDRNOTAVAIL; |
2558 |
|
2559 |
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
2560 |
memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port * 8, |
2561 |
dev->dev_addr, ETH_ALEN); |
2562 |
memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port * 8, |
2563 |
dev->dev_addr, ETH_ALEN); |
2564 |
|
2565 |
if (netif_running(dev)) |
2566 |
sky2_phy_reinit(sky2); |
2567 |
|
2568 |
return 0; |
2569 |
} |
2570 |
|
2571 |
static void sky2_set_multicast(struct net_device *dev) |
2572 |
{ |
2573 |
struct sky2_port *sky2 = netdev_priv(dev); |
2574 |
struct sky2_hw *hw = sky2->hw; |
2575 |
unsigned port = sky2->port; |
2576 |
struct dev_mc_list *list = dev->mc_list; |
2577 |
u16 reg; |
2578 |
u8 filter[8]; |
2579 |
|
2580 |
memset(filter, 0, sizeof(filter)); |
2581 |
|
2582 |
reg = gma_read16(hw, port, GM_RX_CTRL); |
2583 |
reg |= GM_RXCR_UCF_ENA; |
2584 |
|
2585 |
if (dev->flags & IFF_PROMISC) /* promiscuous */ |
2586 |
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); |
2587 |
else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 16) /* all multicast */ |
2588 |
memset(filter, 0xff, sizeof(filter)); |
2589 |
else if (dev->mc_count == 0) /* no multicast */ |
2590 |
reg &= ~GM_RXCR_MCF_ENA; |
2591 |
else { |
2592 |
int i; |
2593 |
reg |= GM_RXCR_MCF_ENA; |
2594 |
|
2595 |
for (i = 0; list && i < dev->mc_count; i++, list = list->next) { |
2596 |
u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; |
2597 |
filter[bit / 8] |= 1 << (bit % 8); |
2598 |
} |
2599 |
} |
2600 |
|
2601 |
gma_write16(hw, port, GM_MC_ADDR_H1, |
2602 |
(u16) filter[0] | ((u16) filter[1] << 8)); |
2603 |
gma_write16(hw, port, GM_MC_ADDR_H2, |
2604 |
(u16) filter[2] | ((u16) filter[3] << 8)); |
2605 |
gma_write16(hw, port, GM_MC_ADDR_H3, |
2606 |
(u16) filter[4] | ((u16) filter[5] << 8)); |
2607 |
gma_write16(hw, port, GM_MC_ADDR_H4, |
2608 |
(u16) filter[6] | ((u16) filter[7] << 8)); |
2609 |
|
2610 |
gma_write16(hw, port, GM_RX_CTRL, reg); |
2611 |
} |
2612 |
|
2613 |
/* Can have one global because blinking is controlled by |
2614 |
* ethtool and that is always under RTNL mutex |
2615 |
*/ |
2616 |
static void sky2_led(struct sky2_hw *hw, unsigned port, int on) |
2617 |
{ |
2618 |
u16 pg; |
2619 |
|
2620 |
switch (hw->chip_id) { |
2621 |
case CHIP_ID_YUKON_XL: |
2622 |
pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
2623 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
2624 |
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, |
2625 |
on ? (PHY_M_LEDC_LOS_CTRL(1) | |
2626 |
PHY_M_LEDC_INIT_CTRL(7) | |
2627 |
PHY_M_LEDC_STA1_CTRL(7) | |
2628 |
PHY_M_LEDC_STA0_CTRL(7)) |
2629 |
: 0); |
2630 |
|
2631 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
2632 |
break; |
2633 |
|
2634 |
default: |
2635 |
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); |
2636 |
gm_phy_write(hw, port, PHY_MARV_LED_OVER, |
2637 |
on ? PHY_M_LED_MO_DUP(MO_LED_ON) | |
2638 |
PHY_M_LED_MO_10(MO_LED_ON) | |
2639 |
PHY_M_LED_MO_100(MO_LED_ON) | |
2640 |
PHY_M_LED_MO_1000(MO_LED_ON) | |
2641 |
PHY_M_LED_MO_RX(MO_LED_ON) |
2642 |
: PHY_M_LED_MO_DUP(MO_LED_OFF) | |
2643 |
PHY_M_LED_MO_10(MO_LED_OFF) | |
2644 |
PHY_M_LED_MO_100(MO_LED_OFF) | |
2645 |
PHY_M_LED_MO_1000(MO_LED_OFF) | |
2646 |
PHY_M_LED_MO_RX(MO_LED_OFF)); |
2647 |
|
2648 |
} |
2649 |
} |
2650 |
|
2651 |
/* blink LED's for finding board */ |
2652 |
static int sky2_phys_id(struct net_device *dev, u32 data) |
2653 |
{ |
2654 |
struct sky2_port *sky2 = netdev_priv(dev); |
2655 |
struct sky2_hw *hw = sky2->hw; |
2656 |
unsigned port = sky2->port; |
2657 |
u16 ledctrl, ledover = 0; |
2658 |
long ms; |
2659 |
int interrupted; |
2660 |
int onoff = 1; |
2661 |
|
2662 |
if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)) |
2663 |
ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT); |
2664 |
else |
2665 |
ms = data * 1000; |
2666 |
|
2667 |
/* save initial values */ |
2668 |
down(&sky2->phy_sema); |
2669 |
if (hw->chip_id == CHIP_ID_YUKON_XL) { |
2670 |
u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
2671 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
2672 |
ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); |
2673 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
2674 |
} else { |
2675 |
ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL); |
2676 |
ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER); |
2677 |
} |
2678 |
|
2679 |
interrupted = 0; |
2680 |
while (!interrupted && ms > 0) { |
2681 |
sky2_led(hw, port, onoff); |
2682 |
onoff = !onoff; |
2683 |
|
2684 |
up(&sky2->phy_sema); |
2685 |
interrupted = msleep_interruptible(250); |
2686 |
down(&sky2->phy_sema); |
2687 |
|
2688 |
ms -= 250; |
2689 |
} |
2690 |
|
2691 |
/* resume regularly scheduled programming */ |
2692 |
if (hw->chip_id == CHIP_ID_YUKON_XL) { |
2693 |
u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
2694 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
2695 |
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl); |
2696 |
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
2697 |
} else { |
2698 |
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); |
2699 |
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); |
2700 |
} |
2701 |
up(&sky2->phy_sema); |
2702 |
|
2703 |
return 0; |
2704 |
} |
2705 |
|
2706 |
static void sky2_get_pauseparam(struct net_device *dev, |
2707 |
struct ethtool_pauseparam *ecmd) |
2708 |
{ |
2709 |
struct sky2_port *sky2 = netdev_priv(dev); |
2710 |
|
2711 |
ecmd->tx_pause = sky2->tx_pause; |
2712 |
ecmd->rx_pause = sky2->rx_pause; |
2713 |
ecmd->autoneg = sky2->autoneg; |
2714 |
} |
2715 |
|
2716 |
static int sky2_set_pauseparam(struct net_device *dev, |
2717 |
struct ethtool_pauseparam *ecmd) |
2718 |
{ |
2719 |
struct sky2_port *sky2 = netdev_priv(dev); |
2720 |
int err = 0; |
2721 |
|
2722 |
sky2->autoneg = ecmd->autoneg; |
2723 |
sky2->tx_pause = ecmd->tx_pause != 0; |
2724 |
sky2->rx_pause = ecmd->rx_pause != 0; |
2725 |
|
2726 |
sky2_phy_reinit(sky2); |
2727 |
|
2728 |
return err; |
2729 |
} |
2730 |
|
2731 |
#ifdef CONFIG_PM |
2732 |
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2733 |
{ |
2734 |
struct sky2_port *sky2 = netdev_priv(dev); |
2735 |
|
2736 |
wol->supported = WAKE_MAGIC; |
2737 |
wol->wolopts = sky2->wol ? WAKE_MAGIC : 0; |
2738 |
} |
2739 |
|
2740 |
static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2741 |
{ |
2742 |
struct sky2_port *sky2 = netdev_priv(dev); |
2743 |
struct sky2_hw *hw = sky2->hw; |
2744 |
|
2745 |
if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) |
2746 |
return -EOPNOTSUPP; |
2747 |
|
2748 |
sky2->wol = wol->wolopts == WAKE_MAGIC; |
2749 |
|
2750 |
if (sky2->wol) { |
2751 |
memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN); |
2752 |
|
2753 |
sky2_write16(hw, WOL_CTRL_STAT, |
2754 |
WOL_CTL_ENA_PME_ON_MAGIC_PKT | |
2755 |
WOL_CTL_ENA_MAGIC_PKT_UNIT); |
2756 |
} else |
2757 |
sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT); |
2758 |
|
2759 |
return 0; |
2760 |
} |
2761 |
#endif |
2762 |
|
2763 |
static int sky2_get_coalesce(struct net_device *dev, |
2764 |
struct ethtool_coalesce *ecmd) |
2765 |
{ |
2766 |
struct sky2_port *sky2 = netdev_priv(dev); |
2767 |
struct sky2_hw *hw = sky2->hw; |
2768 |
|
2769 |
if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) |
2770 |
ecmd->tx_coalesce_usecs = 0; |
2771 |
else { |
2772 |
u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); |
2773 |
ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); |
2774 |
} |
2775 |
ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); |
2776 |
|
2777 |
if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) |
2778 |
ecmd->rx_coalesce_usecs = 0; |
2779 |
else { |
2780 |
u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); |
2781 |
ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); |
2782 |
} |
2783 |
ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); |
2784 |
|
2785 |
if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) |
2786 |
ecmd->rx_coalesce_usecs_irq = 0; |
2787 |
else { |
2788 |
u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); |
2789 |
ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); |
2790 |
} |
2791 |
|
2792 |
ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); |
2793 |
|
2794 |
return 0; |
2795 |
} |
2796 |
|
2797 |
/* Note: this affect both ports */ |
2798 |
static int sky2_set_coalesce(struct net_device *dev, |
2799 |
struct ethtool_coalesce *ecmd) |
2800 |
{ |
2801 |
struct sky2_port *sky2 = netdev_priv(dev); |
2802 |
struct sky2_hw *hw = sky2->hw; |
2803 |
const u32 tmin = sky2_clk2us(hw, 1); |
2804 |
const u32 tmax = 5000; |
2805 |
|
2806 |
if (ecmd->tx_coalesce_usecs != 0 && |
2807 |
(ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax)) |
2808 |
return -EINVAL; |
2809 |
|
2810 |
if (ecmd->rx_coalesce_usecs != 0 && |
2811 |
(ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax)) |
2812 |
return -EINVAL; |
2813 |
|
2814 |
if (ecmd->rx_coalesce_usecs_irq != 0 && |
2815 |
(ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) |
2816 |
return -EINVAL; |
2817 |
|
2818 |
if (ecmd->tx_max_coalesced_frames > 0xffff) |
2819 |
return -EINVAL; |
2820 |
if (ecmd->rx_max_coalesced_frames > 0xff) |
2821 |
return -EINVAL; |
2822 |
if (ecmd->rx_max_coalesced_frames_irq > 0xff) |
2823 |
return -EINVAL; |
2824 |
|
2825 |
if (ecmd->tx_coalesce_usecs == 0) |
2826 |
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); |
2827 |
else { |
2828 |
sky2_write32(hw, STAT_TX_TIMER_INI, |
2829 |
sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); |
2830 |
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); |
2831 |
} |
2832 |
sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); |
2833 |
|
2834 |
if (ecmd->rx_coalesce_usecs == 0) |
2835 |
sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); |
2836 |
else { |
2837 |
sky2_write32(hw, STAT_LEV_TIMER_INI, |
2838 |
sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); |
2839 |
sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); |
2840 |
} |
2841 |
sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); |
2842 |
|
2843 |
if (ecmd->rx_coalesce_usecs_irq == 0) |
2844 |
sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); |
2845 |
else { |
2846 |
sky2_write32(hw, STAT_TX_TIMER_INI, |
2847 |
sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); |
2848 |
sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); |
2849 |
} |
2850 |
sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); |
2851 |
return 0; |
2852 |
} |
2853 |
|
2854 |
static void sky2_get_ringparam(struct net_device *dev, |
2855 |
struct ethtool_ringparam *ering) |
2856 |
{ |
2857 |
struct sky2_port *sky2 = netdev_priv(dev); |
2858 |
|
2859 |
ering->rx_max_pending = RX_MAX_PENDING; |
2860 |
ering->rx_mini_max_pending = 0; |
2861 |
ering->rx_jumbo_max_pending = 0; |
2862 |
ering->tx_max_pending = TX_RING_SIZE - 1; |
2863 |
|
2864 |
ering->rx_pending = sky2->rx_pending; |
2865 |
ering->rx_mini_pending = 0; |
2866 |
ering->rx_jumbo_pending = 0; |
2867 |
ering->tx_pending = sky2->tx_pending; |
2868 |
} |
2869 |
|
2870 |
static int sky2_set_ringparam(struct net_device *dev, |
2871 |
struct ethtool_ringparam *ering) |
2872 |
{ |
2873 |
struct sky2_port *sky2 = netdev_priv(dev); |
2874 |
int err = 0; |
2875 |
|
2876 |
if (ering->rx_pending > RX_MAX_PENDING || |
2877 |
ering->rx_pending < 8 || |
2878 |
ering->tx_pending < MAX_SKB_TX_LE || |
2879 |
ering->tx_pending > TX_RING_SIZE - 1) |
2880 |
return -EINVAL; |
2881 |
|
2882 |
if (netif_running(dev)) |
2883 |
sky2_down(dev); |
2884 |
|
2885 |
sky2->rx_pending = ering->rx_pending; |
2886 |
sky2->tx_pending = ering->tx_pending; |
2887 |
|
2888 |
if (netif_running(dev)) { |
2889 |
err = sky2_up(dev); |
2890 |
if (err) |
2891 |
dev_close(dev); |
2892 |
else |
2893 |
sky2_set_multicast(dev); |
2894 |
} |
2895 |
|
2896 |
return err; |
2897 |
} |
2898 |
|
2899 |
static int sky2_get_regs_len(struct net_device *dev) |
2900 |
{ |
2901 |
return 0x4000; |
2902 |
} |
2903 |
|
2904 |
/* |
2905 |
* Returns copy of control register region |
2906 |
* Note: access to the RAM address register set will cause timeouts. |
2907 |
*/ |
2908 |
static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
2909 |
void *p) |
2910 |
{ |
2911 |
const struct sky2_port *sky2 = netdev_priv(dev); |
2912 |
const void __iomem *io = sky2->hw->regs; |
2913 |
|
2914 |
BUG_ON(regs->len < B3_RI_WTO_R1); |
2915 |
regs->version = 1; |
2916 |
memset(p, 0, regs->len); |
2917 |
|
2918 |
memcpy_fromio(p, io, B3_RAM_ADDR); |
2919 |
|
2920 |
memcpy_fromio(p + B3_RI_WTO_R1, |
2921 |
io + B3_RI_WTO_R1, |
2922 |
regs->len - B3_RI_WTO_R1); |
2923 |
} |
2924 |
|
2925 |
static struct ethtool_ops sky2_ethtool_ops = { |
2926 |
.get_settings = sky2_get_settings, |
2927 |
.set_settings = sky2_set_settings, |
2928 |
.get_drvinfo = sky2_get_drvinfo, |
2929 |
.get_msglevel = sky2_get_msglevel, |
2930 |
.set_msglevel = sky2_set_msglevel, |
2931 |
.nway_reset = sky2_nway_reset, |
2932 |
.get_regs_len = sky2_get_regs_len, |
2933 |
.get_regs = sky2_get_regs, |
2934 |
.get_link = ethtool_op_get_link, |
2935 |
.get_sg = ethtool_op_get_sg, |
2936 |
.set_sg = ethtool_op_set_sg, |
2937 |
.get_tx_csum = ethtool_op_get_tx_csum, |
2938 |
.set_tx_csum = ethtool_op_set_tx_csum, |
2939 |
.get_tso = ethtool_op_get_tso, |
2940 |
.set_tso = ethtool_op_set_tso, |
2941 |
.get_rx_csum = sky2_get_rx_csum, |
2942 |
.set_rx_csum = sky2_set_rx_csum, |
2943 |
.get_strings = sky2_get_strings, |
2944 |
.get_coalesce = sky2_get_coalesce, |
2945 |
.set_coalesce = sky2_set_coalesce, |
2946 |
.get_ringparam = sky2_get_ringparam, |
2947 |
.set_ringparam = sky2_set_ringparam, |
2948 |
.get_pauseparam = sky2_get_pauseparam, |
2949 |
.set_pauseparam = sky2_set_pauseparam, |
2950 |
#ifdef CONFIG_PM |
2951 |
.get_wol = sky2_get_wol, |
2952 |
.set_wol = sky2_set_wol, |
2953 |
#endif |
2954 |
.phys_id = sky2_phys_id, |
2955 |
.get_stats_count = sky2_get_stats_count, |
2956 |
.get_ethtool_stats = sky2_get_ethtool_stats, |
2957 |
.get_perm_addr = ethtool_op_get_perm_addr, |
2958 |
}; |
2959 |
|
2960 |
/* Initialize network device */ |
2961 |
static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, |
2962 |
unsigned port, int highmem) |
2963 |
{ |
2964 |
struct sky2_port *sky2; |
2965 |
struct net_device *dev = alloc_etherdev(sizeof(*sky2)); |
2966 |
|
2967 |
if (!dev) { |
2968 |
printk(KERN_ERR "sky2 etherdev alloc failed"); |
2969 |
return NULL; |
2970 |
} |
2971 |
|
2972 |
SET_MODULE_OWNER(dev); |
2973 |
SET_NETDEV_DEV(dev, &hw->pdev->dev); |
2974 |
dev->irq = hw->pdev->irq; |
2975 |
dev->open = sky2_up; |
2976 |
dev->stop = sky2_down; |
2977 |
dev->do_ioctl = sky2_ioctl; |
2978 |
dev->hard_start_xmit = sky2_xmit_frame; |
2979 |
dev->get_stats = sky2_get_stats; |
2980 |
dev->set_multicast_list = sky2_set_multicast; |
2981 |
dev->set_mac_address = sky2_set_mac_address; |
2982 |
dev->change_mtu = sky2_change_mtu; |
2983 |
SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); |
2984 |
dev->tx_timeout = sky2_tx_timeout; |
2985 |
dev->watchdog_timeo = TX_WATCHDOG; |
2986 |
if (port == 0) |
2987 |
dev->poll = sky2_poll; |
2988 |
dev->weight = NAPI_WEIGHT; |
2989 |
#ifdef CONFIG_NET_POLL_CONTROLLER |
2990 |
dev->poll_controller = sky2_netpoll; |
2991 |
#endif |
2992 |
|
2993 |
sky2 = netdev_priv(dev); |
2994 |
sky2->netdev = dev; |
2995 |
sky2->hw = hw; |
2996 |
sky2->msg_enable = netif_msg_init(debug, default_msg); |
2997 |
|
2998 |
spin_lock_init(&sky2->tx_lock); |
2999 |
/* Auto speed and flow control */ |
3000 |
sky2->autoneg = AUTONEG_ENABLE; |
3001 |
sky2->tx_pause = 1; |
3002 |
sky2->rx_pause = 1; |
3003 |
sky2->duplex = -1; |
3004 |
sky2->speed = -1; |
3005 |
sky2->advertising = sky2_supported_modes(hw); |
3006 |
|
3007 |
/* Receive checksum disabled for Yukon XL |
3008 |
* because of observed problems with incorrect |
3009 |
* values when multiple packets are received in one interrupt |
3010 |
*/ |
3011 |
sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); |
3012 |
|
3013 |
INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2); |
3014 |
init_MUTEX(&sky2->phy_sema); |
3015 |
sky2->tx_pending = TX_DEF_PENDING; |
3016 |
sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING; |
3017 |
sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN); |
3018 |
|
3019 |
hw->dev[port] = dev; |
3020 |
|
3021 |
sky2->port = port; |
3022 |
|
3023 |
dev->features |= NETIF_F_LLTX; |
3024 |
if (hw->chip_id != CHIP_ID_YUKON_EC_U) |
3025 |
dev->features |= NETIF_F_TSO; |
3026 |
if (highmem) |
3027 |
dev->features |= NETIF_F_HIGHDMA; |
3028 |
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
3029 |
|
3030 |
#ifdef SKY2_VLAN_TAG_USED |
3031 |
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
3032 |
dev->vlan_rx_register = sky2_vlan_rx_register; |
3033 |
dev->vlan_rx_kill_vid = sky2_vlan_rx_kill_vid; |
3034 |
#endif |
3035 |
|
3036 |
/* read the mac address */ |
3037 |
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); |
3038 |
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
3039 |
|
3040 |
/* device is off until link detection */ |
3041 |
netif_carrier_off(dev); |
3042 |
netif_stop_queue(dev); |
3043 |
|
3044 |
return dev; |
3045 |
} |
3046 |
|
3047 |
static void __devinit sky2_show_addr(struct net_device *dev) |
3048 |
{ |
3049 |
const struct sky2_port *sky2 = netdev_priv(dev); |
3050 |
|
3051 |
if (netif_msg_probe(sky2)) |
3052 |
printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n", |
3053 |
dev->name, |
3054 |
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], |
3055 |
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); |
3056 |
} |
3057 |
|
3058 |
static int __devinit sky2_probe(struct pci_dev *pdev, |
3059 |
const struct pci_device_id *ent) |
3060 |
{ |
3061 |
struct net_device *dev, *dev1 = NULL; |
3062 |
struct sky2_hw *hw; |
3063 |
int err, pm_cap, using_dac = 0; |
3064 |
|
3065 |
err = pci_enable_device(pdev); |
3066 |
if (err) { |
3067 |
printk(KERN_ERR PFX "%s cannot enable PCI device\n", |
3068 |
pci_name(pdev)); |
3069 |
goto err_out; |
3070 |
} |
3071 |
|
3072 |
err = pci_request_regions(pdev, DRV_NAME); |
3073 |
if (err) { |
3074 |
printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", |
3075 |
pci_name(pdev)); |
3076 |
goto err_out; |
3077 |
} |
3078 |
|
3079 |
pci_set_master(pdev); |
3080 |
|
3081 |
/* Find power-management capability. */ |
3082 |
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
3083 |
if (pm_cap == 0) { |
3084 |
printk(KERN_ERR PFX "Cannot find PowerManagement capability, " |
3085 |
"aborting.\n"); |
3086 |
err = -EIO; |
3087 |
goto err_out_free_regions; |
3088 |
} |
3089 |
|
3090 |
if (sizeof(dma_addr_t) > sizeof(u32) && |
3091 |
!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { |
3092 |
using_dac = 1; |
3093 |
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); |
3094 |
if (err < 0) { |
3095 |
printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " |
3096 |
"for consistent allocations\n", pci_name(pdev)); |
3097 |
goto err_out_free_regions; |
3098 |
} |
3099 |
|
3100 |
} else { |
3101 |
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
3102 |
if (err) { |
3103 |
printk(KERN_ERR PFX "%s no usable DMA configuration\n", |
3104 |
pci_name(pdev)); |
3105 |
goto err_out_free_regions; |
3106 |
} |
3107 |
} |
3108 |
|
3109 |
#ifdef __BIG_ENDIAN |
3110 |
/* byte swap descriptors in hardware */ |
3111 |
{ |
3112 |
u32 reg; |
3113 |
|
3114 |
pci_read_config_dword(pdev, PCI_DEV_REG2, ®); |
3115 |
reg |= PCI_REV_DESC; |
3116 |
pci_write_config_dword(pdev, PCI_DEV_REG2, reg); |
3117 |
} |
3118 |
#endif |
3119 |
|
3120 |
err = -ENOMEM; |
3121 |
hw = kzalloc(sizeof(*hw), GFP_KERNEL); |
3122 |
if (!hw) { |
3123 |
printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", |
3124 |
pci_name(pdev)); |
3125 |
goto err_out_free_regions; |
3126 |
} |
3127 |
|
3128 |
hw->pdev = pdev; |
3129 |
|
3130 |
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
3131 |
if (!hw->regs) { |
3132 |
printk(KERN_ERR PFX "%s: cannot map device registers\n", |
3133 |
pci_name(pdev)); |
3134 |
goto err_out_free_hw; |
3135 |
} |
3136 |
hw->pm_cap = pm_cap; |
3137 |
|
3138 |
err = sky2_reset(hw); |
3139 |
if (err) |
3140 |
goto err_out_iounmap; |
3141 |
|
3142 |
printk(KERN_INFO PFX "v%s addr 0x%lx irq %d Yukon-%s (0x%x) rev %d\n", |
3143 |
DRV_VERSION, pci_resource_start(pdev, 0), pdev->irq, |
3144 |
yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], |
3145 |
hw->chip_id, hw->chip_rev); |
3146 |
|
3147 |
dev = sky2_init_netdev(hw, 0, using_dac); |
3148 |
if (!dev) |
3149 |
goto err_out_free_pci; |
3150 |
|
3151 |
err = register_netdev(dev); |
3152 |
if (err) { |
3153 |
printk(KERN_ERR PFX "%s: cannot register net device\n", |
3154 |
pci_name(pdev)); |
3155 |
goto err_out_free_netdev; |
3156 |
} |
3157 |
|
3158 |
sky2_show_addr(dev); |
3159 |
|
3160 |
if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) { |
3161 |
if (register_netdev(dev1) == 0) |
3162 |
sky2_show_addr(dev1); |
3163 |
else { |
3164 |
/* Failure to register second port need not be fatal */ |
3165 |
printk(KERN_WARNING PFX |
3166 |
"register of second port failed\n"); |
3167 |
hw->dev[1] = NULL; |
3168 |
free_netdev(dev1); |
3169 |
} |
3170 |
} |
3171 |
|
3172 |
err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); |
3173 |
if (err) { |
3174 |
printk(KERN_ERR PFX "%s: cannot assign irq %d\n", |
3175 |
pci_name(pdev), pdev->irq); |
3176 |
goto err_out_unregister; |
3177 |
} |
3178 |
|
3179 |
hw->intr_mask = Y2_IS_BASE; |
3180 |
sky2_write32(hw, B0_IMSK, hw->intr_mask); |
3181 |
|
3182 |
pci_set_drvdata(pdev, hw); |
3183 |
|
3184 |
return 0; |
3185 |
|
3186 |
err_out_unregister: |
3187 |
if (dev1) { |
3188 |
unregister_netdev(dev1); |
3189 |
free_netdev(dev1); |
3190 |
} |
3191 |
unregister_netdev(dev); |
3192 |
err_out_free_netdev: |
3193 |
free_netdev(dev); |
3194 |
err_out_free_pci: |
3195 |
sky2_write8(hw, B0_CTST, CS_RST_SET); |
3196 |
pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); |
3197 |
err_out_iounmap: |
3198 |
iounmap(hw->regs); |
3199 |
err_out_free_hw: |
3200 |
kfree(hw); |
3201 |
err_out_free_regions: |
3202 |
pci_release_regions(pdev); |
3203 |
pci_disable_device(pdev); |
3204 |
err_out: |
3205 |
return err; |
3206 |
} |
3207 |
|
3208 |
static void __devexit sky2_remove(struct pci_dev *pdev) |
3209 |
{ |
3210 |
struct sky2_hw *hw = pci_get_drvdata(pdev); |
3211 |
struct net_device *dev0, *dev1; |
3212 |
|
3213 |
if (!hw) |
3214 |
return; |
3215 |
|
3216 |
dev0 = hw->dev[0]; |
3217 |
dev1 = hw->dev[1]; |
3218 |
if (dev1) |
3219 |
unregister_netdev(dev1); |
3220 |
unregister_netdev(dev0); |
3221 |
|
3222 |
sky2_write32(hw, B0_IMSK, 0); |
3223 |
sky2_set_power_state(hw, PCI_D3hot); |
3224 |
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); |
3225 |
sky2_write8(hw, B0_CTST, CS_RST_SET); |
3226 |
sky2_read8(hw, B0_CTST); |
3227 |
|
3228 |
free_irq(pdev->irq, hw); |
3229 |
pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); |
3230 |
pci_release_regions(pdev); |
3231 |
pci_disable_device(pdev); |
3232 |
|
3233 |
if (dev1) |
3234 |
free_netdev(dev1); |
3235 |
free_netdev(dev0); |
3236 |
iounmap(hw->regs); |
3237 |
kfree(hw); |
3238 |
|
3239 |
pci_set_drvdata(pdev, NULL); |
3240 |
} |
3241 |
|
3242 |
#ifdef CONFIG_PM |
3243 |
static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) |
3244 |
{ |
3245 |
struct sky2_hw *hw = pci_get_drvdata(pdev); |
3246 |
int i; |
3247 |
|
3248 |
for (i = 0; i < 2; i++) { |
3249 |
struct net_device *dev = hw->dev[i]; |
3250 |
|
3251 |
if (dev) { |
3252 |
if (!netif_running(dev)) |
3253 |
continue; |
3254 |
|
3255 |
sky2_down(dev); |
3256 |
netif_device_detach(dev); |
3257 |
} |
3258 |
} |
3259 |
|
3260 |
return sky2_set_power_state(hw, pci_choose_state(pdev, state)); |
3261 |
} |
3262 |
|
3263 |
static int sky2_resume(struct pci_dev *pdev) |
3264 |
{ |
3265 |
struct sky2_hw *hw = pci_get_drvdata(pdev); |
3266 |
int i; |
3267 |
|
3268 |
pci_restore_state(pdev); |
3269 |
pci_enable_wake(pdev, PCI_D0, 0); |
3270 |
sky2_set_power_state(hw, PCI_D0); |
3271 |
|
3272 |
sky2_reset(hw); |
3273 |
|
3274 |
for (i = 0; i < 2; i++) { |
3275 |
struct net_device *dev = hw->dev[i]; |
3276 |
if (dev) { |
3277 |
if (netif_running(dev)) { |
3278 |
netif_device_attach(dev); |
3279 |
if (sky2_up(dev)) |
3280 |
dev_close(dev); |
3281 |
} |
3282 |
} |
3283 |
} |
3284 |
return 0; |
3285 |
} |
3286 |
#endif |
3287 |
|
3288 |
static struct pci_driver sky2_driver = { |
3289 |
.name = DRV_NAME, |
3290 |
.id_table = sky2_id_table, |
3291 |
.probe = sky2_probe, |
3292 |
.remove = __devexit_p(sky2_remove), |
3293 |
#ifdef CONFIG_PM |
3294 |
.suspend = sky2_suspend, |
3295 |
.resume = sky2_resume, |
3296 |
#endif |
3297 |
}; |
3298 |
|
3299 |
static int __init sky2_init_module(void) |
3300 |
{ |
3301 |
return pci_register_driver(&sky2_driver); |
3302 |
} |
3303 |
|
3304 |
static void __exit sky2_cleanup_module(void) |
3305 |
{ |
3306 |
pci_unregister_driver(&sky2_driver); |
3307 |
} |
3308 |
|
3309 |
module_init(sky2_init_module); |
3310 |
module_exit(sky2_cleanup_module); |
3311 |
|
3312 |
MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); |
3313 |
MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>"); |
3314 |
MODULE_LICENSE("GPL"); |
3315 |
MODULE_VERSION(DRV_VERSION); |