Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 74083 | Differences between
and this patch

Collapse All | Expand All

(-)file_not_specified_in_diff (-306 / +716 lines)
Line  Link Here
0
-- a/drivers/net/forcedeth.c   2004-11-17 13:03:34 -08:00
0
++ b/drivers/net/forcedeth.c   2004-11-17 13:03:34 -08:00
Lines 10-17 Link Here
10
 * trademarks of NVIDIA Corporation in the United States and other
10
 * trademarks of NVIDIA Corporation in the United States and other
11
 * countries.
11
 * countries.
12
 *
12
 *
13
 * Copyright (C) 2003 Manfred Spraul
13
 * Copyright (C) 2003,4 Manfred Spraul
14
 * Copyright (C) 2004 Andrew de Quincey (wol support)
14
 * Copyright (C) 2004 Andrew de Quincey (wol support)
15
 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16
 *             IRQ rate fixes, bigendian fixes, cleanups, verification)
17
 * Copyright (c) 2004 NVIDIA Corporation
15
 *
18
 *
16
 * This program is free software; you can redistribute it and/or modify
19
 * This program is free software; you can redistribute it and/or modify
17
 * it under the terms of the GNU General Public License as published by
20
 * it under the terms of the GNU General Public License as published by
Lines 60-74 Link Here
60
 *     0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63
 *     0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
61
 *                        addresses, really stop rx if already running
64
 *                        addresses, really stop rx if already running
62
 *                        in nv_start_rx, clean up a bit.
65
 *                        in nv_start_rx, clean up a bit.
63
 *                             (C) Carl-Daniel Hailfinger
64
 *     0.20: 07 Dec 2003: alloc fixes
66
 *     0.20: 07 Dec 2003: alloc fixes
65
 *     0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67
 *     0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
66
 *     0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
68
 *     0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
67
 *                        on close.
69
 *                        on close.
68
 *                             (C) Carl-Daniel Hailfinger, Manfred Spraul
69
 *     0.23: 26 Jan 2004: various small cleanups
70
 *     0.23: 26 Jan 2004: various small cleanups
70
 *     0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71
 *     0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71
 *     0.25: 09 Mar 2004: wol support
72
 *     0.25: 09 Mar 2004: wol support
73
 *     0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74
 *     0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75
 *                        added CK804/MCP04 device IDs, code fixes
76
 *                        for registers, link status and other minor fixes.
77
 *     0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78
 *     0.29: 31 Aug 2004: Add backup timer for link change notification.
79
 *     0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80
 *                        into nv_close, otherwise reenabling for wol can
81
 *                        cause DMA to kfree'd memory.
72
 *
82
 *
73
 * Known bugs:
83
 * Known bugs:
74
 * We suspect that on some hardware no TX done interrupts are generated.
84
 * We suspect that on some hardware no TX done interrupts are generated.
Lines 80-88 Link Here
80
 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
90
 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
81
 * superfluous timer interrupts from the nic.
91
 * superfluous timer interrupts from the nic.
82
 */
92
 */
83
#define FORCEDETH_VERSION              "0.25"
93
#define FORCEDETH_VERSION              "0.30"
94
#define DRV_NAME                       "forcedeth"
84
95
85
#include <linux/module.h>
96
#include <linux/module.h>
97
#include <linux/moduleparam.h>
86
#include <linux/types.h>
98
#include <linux/types.h>
87
#include <linux/pci.h>
99
#include <linux/pci.h>
88
#include <linux/interrupt.h>
100
#include <linux/interrupt.h>
Lines 113-128 Link Here
113
 * Hardware access:
125
 * Hardware access:
114
 */
126
 */
115
127
116
#define DEV_NEED_LASTPACKET1   0x0001
128
#define DEV_NEED_LASTPACKET1   0x0001  /* set LASTPACKET1 in tx flags */
117
#define DEV_IRQMASK_1          0x0002
129
#define DEV_IRQMASK_1          0x0002  /* use NVREG_IRQMASK_WANTED_1 for irq mask */
118
#define DEV_IRQMASK_2          0x0004
130
#define DEV_IRQMASK_2          0x0004  /* use NVREG_IRQMASK_WANTED_2 for irq mask */
119
#define DEV_NEED_TIMERIRQ      0x0008
131
#define DEV_NEED_TIMERIRQ      0x0008  /* set the timer irq flag in the irq mask */
132
#define DEV_NEED_LINKTIMER     0x0010  /* poll link settings. Relies on the timer irq */
120
133
121
enum {
134
enum {
122
       NvRegIrqStatus = 0x000,
135
       NvRegIrqStatus = 0x000,
123
#define NVREG_IRQSTAT_MIIEVENT 0x040
136
#define NVREG_IRQSTAT_MIIEVENT 0x040
124
#define NVREG_IRQSTAT_MASK             0x1ff
137
#define NVREG_IRQSTAT_MASK             0x1ff
125
       NvRegIrqMask = 0x004,
138
       NvRegIrqMask = 0x004,
139
#define NVREG_IRQ_RX_ERROR             0x0001
126
#define NVREG_IRQ_RX                   0x0002
140
#define NVREG_IRQ_RX                   0x0002
127
#define NVREG_IRQ_RX_NOBUF             0x0004
141
#define NVREG_IRQ_RX_NOBUF             0x0004
128
#define NVREG_IRQ_TX_ERR               0x0008
142
#define NVREG_IRQ_TX_ERR               0x0008
Lines 132-138 Link Here
132
#define NVREG_IRQ_TX1                  0x0100
146
#define NVREG_IRQ_TX1                  0x0100
133
#define NVREG_IRQMASK_WANTED_1         0x005f
147
#define NVREG_IRQMASK_WANTED_1         0x005f
134
#define NVREG_IRQMASK_WANTED_2         0x0147
148
#define NVREG_IRQMASK_WANTED_2         0x0147
135
#define NVREG_IRQ_UNKNOWN              (~(NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1))
149
#define NVREG_IRQ_UNKNOWN              (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1))
136
150
137
       NvRegUnknownSetupReg6 = 0x008,
151
       NvRegUnknownSetupReg6 = 0x008,
138
#define NVREG_UNKSETUP6_VAL            3
152
#define NVREG_UNKSETUP6_VAL            3
Lines 159-165 Link Here
159
173
160
       NvRegOffloadConfig = 0x90,
174
       NvRegOffloadConfig = 0x90,
161
#define NVREG_OFFLOAD_HOMEPHY  0x601
175
#define NVREG_OFFLOAD_HOMEPHY  0x601
162
#define NVREG_OFFLOAD_NORMAL   0x5ee
176
#define NVREG_OFFLOAD_NORMAL   RX_NIC_BUFSIZE
163
       NvRegReceiverControl = 0x094,
177
       NvRegReceiverControl = 0x094,
164
#define NVREG_RCVCTL_START     0x01
178
#define NVREG_RCVCTL_START     0x01
165
       NvRegReceiverStatus = 0x98,
179
       NvRegReceiverStatus = 0x98,
Lines 168-173 Link Here
168
       NvRegRandomSeed = 0x9c,
182
       NvRegRandomSeed = 0x9c,
169
#define NVREG_RNDSEED_MASK     0x00ff
183
#define NVREG_RNDSEED_MASK     0x00ff
170
#define NVREG_RNDSEED_FORCE    0x7f00
184
#define NVREG_RNDSEED_FORCE    0x7f00
185
#define NVREG_RNDSEED_FORCE2   0x2d00
186
#define NVREG_RNDSEED_FORCE3   0x7400
171
187
172
       NvRegUnknownSetupReg1 = 0xA0,
188
       NvRegUnknownSetupReg1 = 0xA0,
173
#define NVREG_UNKSETUP1_VAL    0x16070f
189
#define NVREG_UNKSETUP1_VAL    0x16070f
Lines 181-186 Link Here
181
       NvRegMulticastMaskA = 0xB8,
197
       NvRegMulticastMaskA = 0xB8,
182
       NvRegMulticastMaskB = 0xBC,
198
       NvRegMulticastMaskB = 0xBC,
183
199
200
       NvRegPhyInterface = 0xC0,
201
#define PHY_RGMII              0x10000000
202
184
       NvRegTxRingPhysAddr = 0x100,
203
       NvRegTxRingPhysAddr = 0x100,
185
       NvRegRxRingPhysAddr = 0x104,
204
       NvRegRxRingPhysAddr = 0x104,
186
       NvRegRingSizes = 0x108,
205
       NvRegRingSizes = 0x108,
Lines 189-200 Link Here
189
       NvRegUnknownTransmitterReg = 0x10c,
208
       NvRegUnknownTransmitterReg = 0x10c,
190
       NvRegLinkSpeed = 0x110,
209
       NvRegLinkSpeed = 0x110,
191
#define NVREG_LINKSPEED_FORCE 0x10000
210
#define NVREG_LINKSPEED_FORCE 0x10000
192
#define NVREG_LINKSPEED_10     10
211
#define NVREG_LINKSPEED_10     1000
193
#define NVREG_LINKSPEED_100    100
212
#define NVREG_LINKSPEED_100    100
194
#define NVREG_LINKSPEED_1000   1000
213
#define NVREG_LINKSPEED_1000   50
195
       NvRegUnknownSetupReg5 = 0x130,
214
       NvRegUnknownSetupReg5 = 0x130,
196
#define NVREG_UNKSETUP5_BIT31  (1<<31)
215
#define NVREG_UNKSETUP5_BIT31  (1<<31)
197
       NvRegUnknownSetupReg3 = 0x134,
216
       NvRegUnknownSetupReg3 = 0x13c,
198
#define NVREG_UNKSETUP3_VAL1   0x200010
217
#define NVREG_UNKSETUP3_VAL1   0x200010
199
       NvRegTxRxControl = 0x144,
218
       NvRegTxRxControl = 0x144,
200
#define NVREG_TXRXCTL_KICK     0x0001
219
#define NVREG_TXRXCTL_KICK     0x0001
Lines 202-207 Link Here
202
#define NVREG_TXRXCTL_BIT2     0x0004
221
#define NVREG_TXRXCTL_BIT2     0x0004
203
#define NVREG_TXRXCTL_IDLE     0x0008
222
#define NVREG_TXRXCTL_IDLE     0x0008
204
#define NVREG_TXRXCTL_RESET    0x0010
223
#define NVREG_TXRXCTL_RESET    0x0010
224
#define NVREG_TXRXCTL_RXCHECK  0x0400
205
       NvRegMIIStatus = 0x180,
225
       NvRegMIIStatus = 0x180,
206
#define NVREG_MIISTAT_ERROR            0x0001
226
#define NVREG_MIISTAT_ERROR            0x0001
207
#define NVREG_MIISTAT_LINKCHANGE       0x0008
227
#define NVREG_MIISTAT_LINKCHANGE       0x0008
Lines 213-227 Link Here
213
       NvRegAdapterControl = 0x188,
233
       NvRegAdapterControl = 0x188,
214
#define NVREG_ADAPTCTL_START   0x02
234
#define NVREG_ADAPTCTL_START   0x02
215
#define NVREG_ADAPTCTL_LINKUP  0x04
235
#define NVREG_ADAPTCTL_LINKUP  0x04
216
#define NVREG_ADAPTCTL_PHYVALID        0x4000
236
#define NVREG_ADAPTCTL_PHYVALID        0x40000
217
#define NVREG_ADAPTCTL_RUNNING 0x100000
237
#define NVREG_ADAPTCTL_RUNNING 0x100000
218
#define NVREG_ADAPTCTL_PHYSHIFT        24
238
#define NVREG_ADAPTCTL_PHYSHIFT        24
219
       NvRegMIISpeed = 0x18c,
239
       NvRegMIISpeed = 0x18c,
220
#define NVREG_MIISPEED_BIT8    (1<<8)
240
#define NVREG_MIISPEED_BIT8    (1<<8)
221
#define NVREG_MIIDELAY 5
241
#define NVREG_MIIDELAY 5
222
       NvRegMIIControl = 0x190,
242
       NvRegMIIControl = 0x190,
223
#define NVREG_MIICTL_INUSE     0x10000
243
#define NVREG_MIICTL_INUSE     0x08000
224
#define NVREG_MIICTL_WRITE     0x08000
244
#define NVREG_MIICTL_WRITE     0x00400
225
#define NVREG_MIICTL_ADDRSHIFT 5
245
#define NVREG_MIICTL_ADDRSHIFT 5
226
       NvRegMIIData = 0x194,
246
       NvRegMIIData = 0x194,
227
       NvRegWakeUpFlags = 0x200,
247
       NvRegWakeUpFlags = 0x200,
Lines 253-286 Link Here
253
#define NVREG_POWERSTATE_D3            0x0003
273
#define NVREG_POWERSTATE_D3            0x0003
254
};
274
};
255
275
276
/* Big endian: should work, but is untested */
256
struct ring_desc {
277
struct ring_desc {
257
       u32 PacketBuffer;
278
       u32 PacketBuffer;
258
       u16 Length;
279
       u32 FlagLen;
259
       u16 Flags;
260
};
280
};
261
281
262
#define NV_TX_LASTPACKET       (1<<0)
282
#define FLAG_MASK_V1 0xffff0000
263
#define NV_TX_RETRYERROR       (1<<3)
283
#define FLAG_MASK_V2 0xffffc000
264
#define NV_TX_LASTPACKET1      (1<<8)
284
#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
265
#define NV_TX_DEFERRED         (1<<10)
285
#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
266
#define NV_TX_CARRIERLOST      (1<<11)
286
267
#define NV_TX_LATECOLLISION    (1<<12)
287
#define NV_TX_LASTPACKET       (1<<16)
268
#define NV_TX_UNDERFLOW                (1<<13)
288
#define NV_TX_RETRYERROR       (1<<19)
269
#define NV_TX_ERROR            (1<<14)
289
#define NV_TX_LASTPACKET1      (1<<24)
270
#define NV_TX_VALID            (1<<15)
290
#define NV_TX_DEFERRED         (1<<26)
271
291
#define NV_TX_CARRIERLOST      (1<<27)
272
#define NV_RX_DESCRIPTORVALID  (1<<0)
292
#define NV_TX_LATECOLLISION    (1<<28)
273
#define NV_RX_MISSEDFRAME      (1<<1)
293
#define NV_TX_UNDERFLOW                (1<<29)
274
#define NV_RX_SUBSTRACT1       (1<<3)
294
#define NV_TX_ERROR            (1<<30)
275
#define NV_RX_ERROR1           (1<<7)
295
#define NV_TX_VALID            (1<<31)
276
#define NV_RX_ERROR2           (1<<8)
296
277
#define NV_RX_ERROR3           (1<<9)
297
#define NV_TX2_LASTPACKET      (1<<29)
278
#define NV_RX_ERROR4           (1<<10)
298
#define NV_TX2_RETRYERROR      (1<<18)
279
#define NV_RX_CRCERR           (1<<11)
299
#define NV_TX2_LASTPACKET1     (1<<23)
280
#define NV_RX_OVERFLOW         (1<<12)
300
#define NV_TX2_DEFERRED                (1<<25)
281
#define NV_RX_FRAMINGERR       (1<<13)
301
#define NV_TX2_CARRIERLOST     (1<<26)
282
#define NV_RX_ERROR            (1<<14)
302
#define NV_TX2_LATECOLLISION   (1<<27)
283
#define NV_RX_AVAIL            (1<<15)
303
#define NV_TX2_UNDERFLOW       (1<<28)
304
/* error and valid are the same for both */
305
#define NV_TX2_ERROR           (1<<30)
306
#define NV_TX2_VALID           (1<<31)
307
308
#define NV_RX_DESCRIPTORVALID  (1<<16)
309
#define NV_RX_MISSEDFRAME      (1<<17)
310
#define NV_RX_SUBSTRACT1       (1<<18)
311
#define NV_RX_ERROR1           (1<<23)
312
#define NV_RX_ERROR2           (1<<24)
313
#define NV_RX_ERROR3           (1<<25)
314
#define NV_RX_ERROR4           (1<<26)
315
#define NV_RX_CRCERR           (1<<27)
316
#define NV_RX_OVERFLOW         (1<<28)
317
#define NV_RX_FRAMINGERR       (1<<29)
318
#define NV_RX_ERROR            (1<<30)
319
#define NV_RX_AVAIL            (1<<31)
320
321
#define NV_RX2_CHECKSUMMASK    (0x1C000000)
322
#define NV_RX2_CHECKSUMOK1     (0x10000000)
323
#define NV_RX2_CHECKSUMOK2     (0x14000000)
324
#define NV_RX2_CHECKSUMOK3     (0x18000000)
325
#define NV_RX2_DESCRIPTORVALID (1<<29)
326
#define NV_RX2_SUBSTRACT1      (1<<25)
327
#define NV_RX2_ERROR1          (1<<18)
328
#define NV_RX2_ERROR2          (1<<19)
329
#define NV_RX2_ERROR3          (1<<20)
330
#define NV_RX2_ERROR4          (1<<21)
331
#define NV_RX2_CRCERR          (1<<22)
332
#define NV_RX2_OVERFLOW                (1<<23)
333
#define NV_RX2_FRAMINGERR      (1<<24)
334
/* error and avail are the same for both */
335
#define NV_RX2_ERROR           (1<<30)
336
#define NV_RX2_AVAIL           (1<<31)
284
337
285
/* Miscelaneous hardware related defines: */
338
/* Miscelaneous hardware related defines: */
286
#define NV_PCI_REGSZ           0x270
339
#define NV_PCI_REGSZ           0x270
Lines 306-333 Link Here
306
359
307
/* General driver defaults */
360
/* General driver defaults */
308
#define NV_WATCHDOG_TIMEO      (5*HZ)
361
#define NV_WATCHDOG_TIMEO      (5*HZ)
309
#define DEFAULT_MTU            1500    /* also maximum supported, at least for now */
310
362
311
#define RX_RING                128
363
#define RX_RING                128
312
#define TX_RING                16
364
#define TX_RING                64
313
/* limited to 1 packet until we understand NV_TX_LASTPACKET */
365
/*
314
#define TX_LIMIT_STOP  10
366
 * If your nic mysteriously hangs then try to reduce the limits
315
#define TX_LIMIT_START 5
367
 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
368
 * last valid ring entry. But this would be impossible to
369
 * implement - probably a disassembly error.
370
 */
371
#define TX_LIMIT_STOP  63
372
#define TX_LIMIT_START 62
316
373
317
/* rx/tx mac addr + type + vlan + align + slack*/
374
/* rx/tx mac addr + type + vlan + align + slack*/
318
#define RX_NIC_BUFSIZE         (DEFAULT_MTU + 64)
375
#define RX_NIC_BUFSIZE         (ETH_DATA_LEN + 64)
319
/* even more slack */
376
/* even more slack */
320
#define RX_ALLOC_BUFSIZE       (DEFAULT_MTU + 128)
377
#define RX_ALLOC_BUFSIZE       (ETH_DATA_LEN + 128)
321
378
322
#define OOM_REFILL     (1+HZ/20)
379
#define OOM_REFILL     (1+HZ/20)
323
#define POLL_WAIT      (1+HZ/100)
380
#define POLL_WAIT      (1+HZ/100)
381
#define LINK_TIMEOUT   (3*HZ)
382
383
/*
384
 * desc_ver values:
385
 * This field has two purposes:
386
 * - Newer nics uses a different ring layout. The layout is selected by
387
 *   comparing np->desc_ver with DESC_VER_xy.
388
 * - It contains bits that are forced on when writing to NvRegTxRxControl.
389
 */
390
#define DESC_VER_1     0x0
391
#define DESC_VER_2     (0x02100|NVREG_TXRXCTL_RXCHECK)
392
393
/* PHY defines */
394
#define PHY_OUI_MARVELL        0x5043
395
#define PHY_OUI_CICADA 0x03f1
396
#define PHYID1_OUI_MASK        0x03ff
397
#define PHYID1_OUI_SHFT        6
398
#define PHYID2_OUI_MASK        0xfc00
399
#define PHYID2_OUI_SHFT        10
400
#define PHY_INIT1      0x0f000
401
#define PHY_INIT2      0x0e00
402
#define PHY_INIT3      0x01000
403
#define PHY_INIT4      0x0200
404
#define PHY_INIT5      0x0004
405
#define PHY_INIT6      0x02000
406
#define PHY_GIGABIT    0x0100
407
408
#define PHY_TIMEOUT    0x1
409
#define PHY_ERROR      0x2
410
411
#define PHY_100        0x1
412
#define PHY_1000       0x2
413
#define PHY_HALF       0x100
414
415
/* FIXME: MII defines that should be added to <linux/mii.h> */
416
#define MII_1000BT_CR  0x09
417
#define MII_1000BT_SR  0x0a
418
#define ADVERTISE_1000FULL     0x0200
419
#define ADVERTISE_1000HALF     0x0100
420
#define LPA_1000FULL   0x0800
421
#define LPA_1000HALF   0x0400
422
324
423
325
/*
424
/*
326
 * SMP locking:
425
 * SMP locking:
327
 * All hardware access under dev->priv->lock, except the performance
426
 * All hardware access under dev->priv->lock, except the performance
328
 * critical parts:
427
 * critical parts:
329
 * - rx is (pseudo-) lockless: it relies on the single-threading provided
428
 * - rx is (pseudo-) lockless: it relies on the single-threading provided
330
 *     by the arch code for interrupts.
429
 *     by the arch code for interrupts.
331
 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
430
 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
332
 *     needs dev->priv->lock :-(
431
 *     needs dev->priv->lock :-(
333
 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
432
 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
Lines 345-356 Link Here
345
       int duplex;
444
       int duplex;
346
       int phyaddr;
445
       int phyaddr;
347
       int wolenabled;
446
       int wolenabled;
447
       unsigned int phy_oui;
448
       u16 gigabit;
348
449
349
       /* General data: RO fields */
450
       /* General data: RO fields */
350
       dma_addr_t ring_addr;
451
       dma_addr_t ring_addr;
351
       struct pci_dev *pci_dev;
452
       struct pci_dev *pci_dev;
352
       u32 orig_mac[2];
453
       u32 orig_mac[2];
353
       u32 irqmask;
454
       u32 irqmask;
455
       u32 desc_ver;
354
456
355
       /* rx specific fields.
457
       /* rx specific fields.
356
        * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
458
        * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
Lines 363-368 Link Here
363
       struct timer_list oom_kick;
465
       struct timer_list oom_kick;
364
       struct timer_list nic_poll;
466
       struct timer_list nic_poll;
365
467
468
       /* media detection workaround.
469
        * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
470
        */
471
       int need_linktimer;
472
       unsigned long link_timeout;
366
       /*
473
       /*
367
        * tx specific fields.
474
        * tx specific fields.
368
        */
475
        */
Lines 370-376 Link Here
370
       unsigned int next_tx, nic_tx;
477
       unsigned int next_tx, nic_tx;
371
       struct sk_buff *tx_skbuff[TX_RING];
478
       struct sk_buff *tx_skbuff[TX_RING];
372
       dma_addr_t tx_dma[TX_RING];
479
       dma_addr_t tx_dma[TX_RING];
373
       u16 tx_flags;
480
       u32 tx_flags;
374
};
481
};
375
482
376
/*
483
/*
Lines 395-400 Link Here
395
       readl(base);
502
       readl(base);
396
}
503
}
397
504
505
static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
506
{
507
       return le32_to_cpu(prd->FlagLen)
508
               & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
509
}
510
398
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
511
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
399
                               int delay, int delaymax, const char *msg)
512
                               int delay, int delaymax, const char *msg)
400
{
513
{
Lines 421-444 Link Here
421
static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
534
static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
422
{
535
{
423
       u8 *base = get_hwbase(dev);
536
       u8 *base = get_hwbase(dev);
424
       int was_running;
425
       u32 reg;
537
       u32 reg;
426
       int retval;
538
       int retval;
427
539
428
       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
540
       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
429
       was_running = 0;
541
430
       reg = readl(base + NvRegAdapterControl);
431
       if (reg & NVREG_ADAPTCTL_RUNNING) {
432
               was_running = 1;
433
               writel(reg & ~NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
434
       }
435
       reg = readl(base + NvRegMIIControl);
542
       reg = readl(base + NvRegMIIControl);
436
       if (reg & NVREG_MIICTL_INUSE) {
543
       if (reg & NVREG_MIICTL_INUSE) {
437
               writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
544
               writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
438
               udelay(NV_MIIBUSY_DELAY);
545
               udelay(NV_MIIBUSY_DELAY);
439
       }
546
       }
440
547
441
       reg = NVREG_MIICTL_INUSE | (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
548
       reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
442
       if (value != MII_READ) {
549
       if (value != MII_READ) {
443
               writel(value, base + NvRegMIIData);
550
               writel(value, base + NvRegMIIData);
444
               reg |= NVREG_MIICTL_WRITE;
551
               reg |= NVREG_MIICTL_WRITE;
Lines 460-478 Link Here
460
                               dev->name, miireg, addr);
567
                               dev->name, miireg, addr);
461
               retval = -1;
568
               retval = -1;
462
       } else {
569
       } else {
463
               /* FIXME: why is that required? */
464
               udelay(50);
465
               retval = readl(base + NvRegMIIData);
570
               retval = readl(base + NvRegMIIData);
466
               dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
571
               dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
467
                               dev->name, miireg, addr, retval);
572
                               dev->name, miireg, addr, retval);
468
       }
573
       }
469
       if (was_running) {
574
470
               reg = readl(base + NvRegAdapterControl);
471
               writel(reg | NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
472
       }
473
       return retval;
575
       return retval;
474
}
576
}
475
577
578
static void msleep(unsigned long msecs)
579
{
580
       set_current_state(TASK_UNINTERRUPTIBLE);
581
       schedule_timeout((HZ * msecs + 999) / 1000);
582
}
583
584
static int phy_reset(struct net_device *dev)
585
{
586
       struct fe_priv *np = get_nvpriv(dev);
587
       u32 miicontrol;
588
       unsigned int tries = 0;
589
590
       miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
591
       miicontrol |= BMCR_RESET;
592
       if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
593
               return -1;
594
       }
595
596
       /* wait for 500ms */
597
       msleep(500);
598
599
       /* must wait till reset is deasserted */
600
       while (miicontrol & BMCR_RESET) {
601
               msleep(10);
602
               miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
603
               /* FIXME: 100 tries seem excessive */
604
               if (tries++ > 100)
605
                       return -1;
606
       }
607
       return 0;
608
}
609
610
static int phy_init(struct net_device *dev)
611
{
612
       struct fe_priv *np = get_nvpriv(dev);
613
       u8 *base = get_hwbase(dev);
614
       u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
615
616
       /* set advertise register */
617
       reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
618
       reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
619
       if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
620
               printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
621
               return PHY_ERROR;
622
       }
623
624
       /* get phy interface type */
625
       phyinterface = readl(base + NvRegPhyInterface);
626
627
       /* see if gigabit phy */
628
       mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
629
       if (mii_status & PHY_GIGABIT) {
630
               np->gigabit = PHY_GIGABIT;
631
               mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
632
               mii_control_1000 &= ~ADVERTISE_1000HALF;
633
               if (phyinterface & PHY_RGMII)
634
                       mii_control_1000 |= ADVERTISE_1000FULL;
635
               else
636
                       mii_control_1000 &= ~ADVERTISE_1000FULL;
637
638
               if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
639
                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
640
                       return PHY_ERROR;
641
               }
642
       }
643
       else
644
               np->gigabit = 0;
645
646
       /* reset the phy */
647
       if (phy_reset(dev)) {
648
               printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
649
               return PHY_ERROR;
650
       }
651
652
       /* phy vendor specific configuration */
653
       if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
654
               phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
655
               phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
656
               phy_reserved |= (PHY_INIT3 | PHY_INIT4);
657
               if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
658
                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
659
                       return PHY_ERROR;
660
               }
661
               phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
662
               phy_reserved |= PHY_INIT5;
663
               if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
664
                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
665
                       return PHY_ERROR;
666
               }
667
       }
668
       if (np->phy_oui == PHY_OUI_CICADA) {
669
               phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
670
               phy_reserved |= PHY_INIT6;
671
               if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
672
                       printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
673
                       return PHY_ERROR;
674
               }
675
       }
676
677
       /* restart auto negotiation */
678
       mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
679
       mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
680
       if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
681
               return PHY_ERROR;
682
       }
683
684
       return 0;
685
}
686
476
static void nv_start_rx(struct net_device *dev)
687
static void nv_start_rx(struct net_device *dev)
477
{
688
{
478
       struct fe_priv *np = get_nvpriv(dev);
689
       struct fe_priv *np = get_nvpriv(dev);
Lines 487-492 Link Here
487
       writel(np->linkspeed, base + NvRegLinkSpeed);
698
       writel(np->linkspeed, base + NvRegLinkSpeed);
488
       pci_push(base);
699
       pci_push(base);
489
       writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
700
       writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
701
       dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
702
                               dev->name, np->duplex, np->linkspeed);
490
       pci_push(base);
703
       pci_push(base);
491
}
704
}
492
705
Lines 497-504 Link Here
497
       dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
710
       dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
498
       writel(0, base + NvRegReceiverControl);
711
       writel(0, base + NvRegReceiverControl);
499
       reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
712
       reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
500
                      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
713
                       NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
501
                      KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
714
                       KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
502
715
503
       udelay(NV_RXSTOP_DELAY2);
716
       udelay(NV_RXSTOP_DELAY2);
504
       writel(0, base + NvRegLinkSpeed);
717
       writel(0, base + NvRegLinkSpeed);
Lines 520-527 Link Here
520
       dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
733
       dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
521
       writel(0, base + NvRegTransmitterControl);
734
       writel(0, base + NvRegTransmitterControl);
522
       reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
735
       reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
523
                      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
736
                       NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
524
                      KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
737
                       KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
525
738
526
       udelay(NV_TXSTOP_DELAY2);
739
       udelay(NV_TXSTOP_DELAY2);
527
       writel(0, base + NvRegUnknownTransmitterReg);
740
       writel(0, base + NvRegUnknownTransmitterReg);
Lines 529-541 Link Here
529
742
530
static void nv_txrx_reset(struct net_device *dev)
743
static void nv_txrx_reset(struct net_device *dev)
531
{
744
{
745
       struct fe_priv *np = get_nvpriv(dev);
532
       u8 *base = get_hwbase(dev);
746
       u8 *base = get_hwbase(dev);
533
747
534
       dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
748
       dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
535
       writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET, base + NvRegTxRxControl);
749
       writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl);
536
       pci_push(base);
750
       pci_push(base);
537
       udelay(NV_TXRX_RESET_DELAY);
751
       udelay(NV_TXRX_RESET_DELAY);
538
       writel(NVREG_TXRXCTL_BIT2, base + NvRegTxRxControl);
752
       writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl);
539
       pci_push(base);
753
       pci_push(base);
540
}
754
}
541
755
Lines 556-646 Link Here
556
       return &np->stats;
770
       return &np->stats;
557
}
771
}
558
772
559
static int nv_ethtool_ioctl(struct net_device *dev, void *useraddr)
773
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
560
{
774
{
561
       struct fe_priv *np = get_nvpriv(dev);
775
       struct fe_priv *np = get_nvpriv(dev);
562
       u8 *base = get_hwbase(dev);
776
       strcpy(info->driver, "forcedeth");
563
       u32 ethcmd;
777
       strcpy(info->version, FORCEDETH_VERSION);
564
778
       strcpy(info->bus_info, pci_name(np->pci_dev));
565
       if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
779
}
566
               return -EFAULT;
567
568
       switch (ethcmd) {
569
       case ETHTOOL_GDRVINFO:
570
       {
571
               struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
572
               strcpy(info.driver, "forcedeth");
573
               strcpy(info.version, FORCEDETH_VERSION);
574
               strcpy(info.bus_info, pci_name(np->pci_dev));
575
               if (copy_to_user(useraddr, &info, sizeof (info)))
576
                       return -EFAULT;
577
               return 0;
578
       }
579
       case ETHTOOL_GLINK:
580
       {
581
               struct ethtool_value edata = { ETHTOOL_GLINK };
582
583
               edata.data = !!netif_carrier_ok(dev);
584
585
               if (copy_to_user(useraddr, &edata, sizeof(edata)))
586
                       return -EFAULT;
587
               return 0;
588
       }
589
       case ETHTOOL_GWOL:
590
       {
591
               struct ethtool_wolinfo wolinfo;
592
               memset(&wolinfo, 0, sizeof(wolinfo));
593
               wolinfo.supported = WAKE_MAGIC;
594
595
               spin_lock_irq(&np->lock);
596
               if (np->wolenabled)
597
                       wolinfo.wolopts = WAKE_MAGIC;
598
               spin_unlock_irq(&np->lock);
599
600
               if (copy_to_user(useraddr, &wolinfo, sizeof(wolinfo)))
601
                       return -EFAULT;
602
               return 0;
603
       }
604
       case ETHTOOL_SWOL:
605
       {
606
               struct ethtool_wolinfo wolinfo;
607
               if (copy_from_user(&wolinfo, useraddr, sizeof(wolinfo)))
608
                       return -EFAULT;
609
610
               spin_lock_irq(&np->lock);
611
               if (wolinfo.wolopts == 0) {
612
                       writel(0, base + NvRegWakeUpFlags);
613
                       np->wolenabled = 0;
614
               }
615
               if (wolinfo.wolopts & WAKE_MAGIC) {
616
                       writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
617
                       np->wolenabled = 1;
618
               }
619
               spin_unlock_irq(&np->lock);
620
               return 0;
621
       }
622
780
623
       default:
781
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
624
               break;
782
{
625
       }
783
       struct fe_priv *np = get_nvpriv(dev);
784
       wolinfo->supported = WAKE_MAGIC;
626
785
627
       return -EOPNOTSUPP;
786
       spin_lock_irq(&np->lock);
787
       if (np->wolenabled)
788
               wolinfo->wolopts = WAKE_MAGIC;
789
       spin_unlock_irq(&np->lock);
628
}
790
}
629
/*
791
630
 * nv_ioctl: dev->do_ioctl function
792
static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
631
 * Called with rtnl_lock held.
632
 */
633
static int nv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
634
{
793
{
635
       switch(cmd) {
794
       struct fe_priv *np = get_nvpriv(dev);
636
       case SIOCETHTOOL:
795
       u8 *base = get_hwbase(dev);
637
               return nv_ethtool_ioctl(dev, (void *) rq->ifr_data);
638
796
639
       default:
797
       spin_lock_irq(&np->lock);
640
               return -EOPNOTSUPP;
798
       if (wolinfo->wolopts == 0) {
799
               writel(0, base + NvRegWakeUpFlags);
800
               np->wolenabled = 0;
801
       }
802
       if (wolinfo->wolopts & WAKE_MAGIC) {
803
               writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
804
               np->wolenabled = 1;
641
       }
805
       }
806
       spin_unlock_irq(&np->lock);
807
       return 0;
642
}
808
}
643
809
810
static struct ethtool_ops ops = {
811
       .get_drvinfo = nv_get_drvinfo,
812
       .get_link = ethtool_op_get_link,
813
       .get_wol = nv_get_wol,
814
       .set_wol = nv_set_wol,
815
};
816
644
/*
817
/*
645
 * nv_alloc_rx: fill rx ring entries.
818
 * nv_alloc_rx: fill rx ring entries.
646
 * Return 1 if the allocations for the skbs failed and the
819
 * Return 1 if the allocations for the skbs failed and the
Lines 650-660 Link Here
650
{
823
{
651
       struct fe_priv *np = get_nvpriv(dev);
824
       struct fe_priv *np = get_nvpriv(dev);
652
       unsigned int refill_rx = np->refill_rx;
825
       unsigned int refill_rx = np->refill_rx;
826
       int nr;
653
827
654
       while (np->cur_rx != refill_rx) {
828
       while (np->cur_rx != refill_rx) {
655
               int nr = refill_rx % RX_RING;
656
               struct sk_buff *skb;
829
               struct sk_buff *skb;
657
830
831
               nr = refill_rx % RX_RING;
658
               if (np->rx_skbuff[nr] == NULL) {
832
               if (np->rx_skbuff[nr] == NULL) {
659
833
660
                       skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
834
                       skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
Lines 669-678 Link Here
669
               np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
843
               np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
670
                                               PCI_DMA_FROMDEVICE);
844
                                               PCI_DMA_FROMDEVICE);
671
               np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
845
               np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
672
               np->rx_ring[nr].Length = cpu_to_le16(RX_NIC_BUFSIZE);
673
               wmb();
846
               wmb();
674
               np->rx_ring[nr].Flags = cpu_to_le16(NV_RX_AVAIL);
847
               np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL);
675
               dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet  %d marked as Available\n",
848
               dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
676
                                       dev->name, refill_rx);
849
                                       dev->name, refill_rx);
677
               refill_rx++;
850
               refill_rx++;
678
       }
851
       }
Lines 703-717 Link Here
703
       int i;
876
       int i;
704
877
705
       np->next_tx = np->nic_tx = 0;
878
       np->next_tx = np->nic_tx = 0;
706
       for (i = 0; i < TX_RING; i++) {
879
       for (i = 0; i < TX_RING; i++)
707
               np->tx_ring[i].Flags = 0;
880
               np->tx_ring[i].FlagLen = 0;
708
       }
709
881
710
       np->cur_rx = RX_RING;
882
       np->cur_rx = RX_RING;
711
       np->refill_rx = 0;
883
       np->refill_rx = 0;
712
       for (i = 0; i < RX_RING; i++) {
884
       for (i = 0; i < RX_RING; i++)
713
               np->rx_ring[i].Flags = 0;
885
               np->rx_ring[i].FlagLen = 0;
714
       }
715
       return nv_alloc_rx(dev);
886
       return nv_alloc_rx(dev);
716
}
887
}
717
888
Lines 720-726 Link Here
720
       struct fe_priv *np = get_nvpriv(dev);
891
       struct fe_priv *np = get_nvpriv(dev);
721
       int i;
892
       int i;
722
       for (i = 0; i < TX_RING; i++) {
893
       for (i = 0; i < TX_RING; i++) {
723
               np->tx_ring[i].Flags = 0;
894
               np->tx_ring[i].FlagLen = 0;
724
               if (np->tx_skbuff[i]) {
895
               if (np->tx_skbuff[i]) {
725
                       pci_unmap_single(np->pci_dev, np->tx_dma[i],
896
                       pci_unmap_single(np->pci_dev, np->tx_dma[i],
726
                                               np->tx_skbuff[i]->len,
897
                                               np->tx_skbuff[i]->len,
Lines 737-743 Link Here
737
       struct fe_priv *np = get_nvpriv(dev);
908
       struct fe_priv *np = get_nvpriv(dev);
738
       int i;
909
       int i;
739
       for (i = 0; i < RX_RING; i++) {
910
       for (i = 0; i < RX_RING; i++) {
740
               np->rx_ring[i].Flags = 0;
911
               np->rx_ring[i].FlagLen = 0;
741
               wmb();
912
               wmb();
742
               if (np->rx_skbuff[i]) {
913
               if (np->rx_skbuff[i]) {
743
                       pci_unmap_single(np->pci_dev, np->rx_dma[i],
914
                       pci_unmap_single(np->pci_dev, np->rx_dma[i],
Lines 769-779 Link Here
769
                                       PCI_DMA_TODEVICE);
940
                                       PCI_DMA_TODEVICE);
770
941
771
       np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
942
       np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
772
       np->tx_ring[nr].Length = cpu_to_le16(skb->len-1);
773
943
774
       spin_lock_irq(&np->lock);
944
       spin_lock_irq(&np->lock);
775
       wmb();
945
       wmb();
776
       np->tx_ring[nr].Flags = np->tx_flags;
946
       np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
777
       dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
947
       dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
778
                               dev->name, np->next_tx);
948
                               dev->name, np->next_tx);
779
       {
949
       {
Lines 792-798 Link Here
792
       if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
962
       if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
793
               netif_stop_queue(dev);
963
               netif_stop_queue(dev);
794
       spin_unlock_irq(&np->lock);
964
       spin_unlock_irq(&np->lock);
795
       writel(NVREG_TXRXCTL_KICK, get_hwbase(dev) + NvRegTxRxControl);
965
       writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
796
       pci_push(get_hwbase(dev));
966
       pci_push(get_hwbase(dev));
797
       return 0;
967
       return 0;
798
}
968
}
Lines 805-831 Link Here
805
static void nv_tx_done(struct net_device *dev)
975
static void nv_tx_done(struct net_device *dev)
806
{
976
{
807
       struct fe_priv *np = get_nvpriv(dev);
977
       struct fe_priv *np = get_nvpriv(dev);
978
       u32 Flags;
979
       int i;
808
980
809
       while (np->nic_tx < np->next_tx) {
981
       while (np->nic_tx != np->next_tx) {
810
               struct ring_desc *prd;
982
               i = np->nic_tx % TX_RING;
811
               int i = np->nic_tx % TX_RING;
812
983
813
               prd = &np->tx_ring[i];
984
               Flags = le32_to_cpu(np->tx_ring[i].FlagLen);
814
985
815
               dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
986
               dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
816
                                       dev->name, np->nic_tx, prd->Flags);
987
                                       dev->name, np->nic_tx, Flags);
817
               if (prd->Flags & cpu_to_le16(NV_TX_VALID))
988
               if (Flags & NV_TX_VALID)
818
                       break;
989
                       break;
819
               if (prd->Flags & cpu_to_le16(NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
990
               if (np->desc_ver == DESC_VER_1) {
820
                                               NV_TX_UNDERFLOW|NV_TX_ERROR)) {
991
                       if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
821
                       if (prd->Flags & cpu_to_le16(NV_TX_UNDERFLOW))
992
                                                       NV_TX_UNDERFLOW|NV_TX_ERROR)) {
822
                               np->stats.tx_fifo_errors++;
993
                               if (Flags & NV_TX_UNDERFLOW)
823
                       if (prd->Flags & cpu_to_le16(NV_TX_CARRIERLOST))
994
                                       np->stats.tx_fifo_errors++;
824
                               np->stats.tx_carrier_errors++;
995
                               if (Flags & NV_TX_CARRIERLOST)
825
                       np->stats.tx_errors++;
996
                                       np->stats.tx_carrier_errors++;
997
                               np->stats.tx_errors++;
998
                       } else {
999
                               np->stats.tx_packets++;
1000
                               np->stats.tx_bytes += np->tx_skbuff[i]->len;
1001
                       }
826
               } else {
1002
               } else {
827
                       np->stats.tx_packets++;
1003
                       if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
828
                       np->stats.tx_bytes += np->tx_skbuff[i]->len;
1004
                                                       NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1005
                               if (Flags & NV_TX2_UNDERFLOW)
1006
                                       np->stats.tx_fifo_errors++;
1007
                               if (Flags & NV_TX2_CARRIERLOST)
1008
                                       np->stats.tx_carrier_errors++;
1009
                               np->stats.tx_errors++;
1010
                       } else {
1011
                               np->stats.tx_packets++;
1012
                               np->stats.tx_bytes += np->tx_skbuff[i]->len;
1013
                       }
829
               }
1014
               }
830
               pci_unmap_single(np->pci_dev, np->tx_dma[i],
1015
               pci_unmap_single(np->pci_dev, np->tx_dma[i],
831
                                       np->tx_skbuff[i]->len,
1016
                                       np->tx_skbuff[i]->len,
Lines 875-883 Link Here
875
static void nv_rx_process(struct net_device *dev)
1060
static void nv_rx_process(struct net_device *dev)
876
{
1061
{
877
       struct fe_priv *np = get_nvpriv(dev);
1062
       struct fe_priv *np = get_nvpriv(dev);
1063
       u32 Flags;
878
1064
879
       for (;;) {
1065
       for (;;) {
880
               struct ring_desc *prd;
881
               struct sk_buff *skb;
1066
               struct sk_buff *skb;
882
               int len;
1067
               int len;
883
               int i;
1068
               int i;
Lines 885-895 Link Here
885
                       break;  /* we scanned the whole ring - do not continue */
1070
                       break;  /* we scanned the whole ring - do not continue */
886
1071
887
               i = np->cur_rx % RX_RING;
1072
               i = np->cur_rx % RX_RING;
888
               prd = &np->rx_ring[i];
1073
               Flags = le32_to_cpu(np->rx_ring[i].FlagLen);
1074
               len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver);
1075
889
               dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1076
               dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
890
                                       dev->name, np->cur_rx, prd->Flags);
1077
                                       dev->name, np->cur_rx, Flags);
891
1078
892
               if (prd->Flags & cpu_to_le16(NV_RX_AVAIL))
1079
               if (Flags & NV_RX_AVAIL)
893
                       break;  /* still owned by hardware, */
1080
                       break;  /* still owned by hardware, */
894
1081
895
               /*
1082
               /*
Lines 903-909 Link Here
903
1090
904
               {
1091
               {
905
                       int j;
1092
                       int j;
906
                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",prd->Flags);
1093
                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
907
                       for (j=0; j<64; j++) {
1094
                       for (j=0; j<64; j++) {
908
                               if ((j%16) == 0)
1095
                               if ((j%16) == 0)
909
                                       dprintk("\n%03x:", j);
1096
                                       dprintk("\n%03x:", j);
Lines 912-952 Link Here
912
                       dprintk("\n");
1099
                       dprintk("\n");
913
               }
1100
               }
914
               /* look at what we actually got: */
1101
               /* look at what we actually got: */
915
               if (!(prd->Flags & cpu_to_le16(NV_RX_DESCRIPTORVALID)))
1102
               if (np->desc_ver == DESC_VER_1) {
916
                       goto next_pkt;
1103
                       if (!(Flags & NV_RX_DESCRIPTORVALID))
917
1104
                               goto next_pkt;
918
919
               len = le16_to_cpu(prd->Length);
920
1105
921
               if (prd->Flags & cpu_to_le16(NV_RX_MISSEDFRAME)) {
1106
                       if (Flags & NV_RX_MISSEDFRAME) {
922
                       np->stats.rx_missed_errors++;
1107
                               np->stats.rx_missed_errors++;
923
                       np->stats.rx_errors++;
1108
                               np->stats.rx_errors++;
924
                       goto next_pkt;
1109
                               goto next_pkt;
925
               }
1110
                       }
926
               if (prd->Flags & cpu_to_le16(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) {
1111
                       if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) {
927
                       np->stats.rx_errors++;
1112
                               np->stats.rx_errors++;
928
                       goto next_pkt;
1113
                               goto next_pkt;
929
               }
1114
                       }
930
               if (prd->Flags & cpu_to_le16(NV_RX_CRCERR)) {
1115
                       if (Flags & NV_RX_CRCERR) {
931
                       np->stats.rx_crc_errors++;
1116
                               np->stats.rx_crc_errors++;
932
                       np->stats.rx_errors++;
1117
                               np->stats.rx_errors++;
933
                       goto next_pkt;
1118
                               goto next_pkt;
934
               }
1119
                       }
935
               if (prd->Flags & cpu_to_le16(NV_RX_OVERFLOW)) {
1120
                       if (Flags & NV_RX_OVERFLOW) {
936
                       np->stats.rx_over_errors++;
1121
                               np->stats.rx_over_errors++;
937
                       np->stats.rx_errors++;
1122
                               np->stats.rx_errors++;
938
                       goto next_pkt;
1123
                               goto next_pkt;
939
               }
1124
                       }
940
               if (prd->Flags & cpu_to_le16(NV_RX_ERROR)) {
1125
                       if (Flags & NV_RX_ERROR) {
941
                       /* framing errors are soft errors, the rest is fatal. */
1126
                               /* framing errors are soft errors, the rest is fatal. */
942
                       if (prd->Flags & cpu_to_le16(NV_RX_FRAMINGERR)) {
1127
                               if (Flags & NV_RX_FRAMINGERR) {
943
                               if (prd->Flags & cpu_to_le16(NV_RX_SUBSTRACT1)) {
1128
                                       if (Flags & NV_RX_SUBSTRACT1) {
944
                                       len--;
1129
                                               len--;
1130
                                       }
1131
                               } else {
1132
                                       np->stats.rx_errors++;
1133
                                       goto next_pkt;
945
                               }
1134
                               }
946
                       } else {
1135
                       }
1136
               } else {
1137
                       if (!(Flags & NV_RX2_DESCRIPTORVALID))
1138
                               goto next_pkt;
1139
1140
                       if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4)) {
947
                               np->stats.rx_errors++;
1141
                               np->stats.rx_errors++;
948
                               goto next_pkt;
1142
                               goto next_pkt;
949
                       }
1143
                       }
1144
                       if (Flags & NV_RX2_CRCERR) {
1145
                               np->stats.rx_crc_errors++;
1146
                               np->stats.rx_errors++;
1147
                               goto next_pkt;
1148
                       }
1149
                       if (Flags & NV_RX2_OVERFLOW) {
1150
                               np->stats.rx_over_errors++;
1151
                               np->stats.rx_errors++;
1152
                               goto next_pkt;
1153
                       }
1154
                       if (Flags & NV_RX2_ERROR) {
1155
                               /* framing errors are soft errors, the rest is fatal. */
1156
                               if (Flags & NV_RX2_FRAMINGERR) {
1157
                                       if (Flags & NV_RX2_SUBSTRACT1) {
1158
                                               len--;
1159
                                       }
1160
                               } else {
1161
                                       np->stats.rx_errors++;
1162
                                       goto next_pkt;
1163
                               }
1164
                       }
1165
                       Flags &= NV_RX2_CHECKSUMMASK;
1166
                       if (Flags == NV_RX2_CHECKSUMOK1 ||
1167
                                       Flags == NV_RX2_CHECKSUMOK2 ||
1168
                                       Flags == NV_RX2_CHECKSUMOK3) {
1169
                               dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1170
                               np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1171
                       } else {
1172
                               dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1173
                       }
950
               }
1174
               }
951
               /* got a valid packet - forward it to the network core */
1175
               /* got a valid packet - forward it to the network core */
952
               skb = np->rx_skbuff[i];
1176
               skb = np->rx_skbuff[i];
Lines 971-977 Link Here
971
 */
1195
 */
972
static int nv_change_mtu(struct net_device *dev, int new_mtu)
1196
static int nv_change_mtu(struct net_device *dev, int new_mtu)
973
{
1197
{
974
       if (new_mtu > DEFAULT_MTU)
1198
       if (new_mtu > ETH_DATA_LEN)
975
               return -EINVAL;
1199
               return -EINVAL;
976
       dev->mtu = new_mtu;
1200
       dev->mtu = new_mtu;
977
       return 0;
1201
       return 0;
Lines 1035-1040 Link Here
1035
       writel(mask[0], base + NvRegMulticastMaskA);
1259
       writel(mask[0], base + NvRegMulticastMaskA);
1036
       writel(mask[1], base + NvRegMulticastMaskB);
1260
       writel(mask[1], base + NvRegMulticastMaskB);
1037
       writel(pff, base + NvRegPacketFilterFlags);
1261
       writel(pff, base + NvRegPacketFilterFlags);
1262
       dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
1263
               dev->name);
1038
       nv_start_rx(dev);
1264
       nv_start_rx(dev);
1039
       spin_unlock_irq(&np->lock);
1265
       spin_unlock_irq(&np->lock);
1040
}
1266
}
Lines 1042-1057 Link Here
1042
static int nv_update_linkspeed(struct net_device *dev)
1268
static int nv_update_linkspeed(struct net_device *dev)
1043
{
1269
{
1044
       struct fe_priv *np = get_nvpriv(dev);
1270
       struct fe_priv *np = get_nvpriv(dev);
1045
       int adv, lpa, newls, newdup;
1271
       u8 *base = get_hwbase(dev);
1272
       int adv, lpa;
1273
       int newls = np->linkspeed;
1274
       int newdup = np->duplex;
1275
       int mii_status;
1276
       int retval = 0;
1277
       u32 control_1000, status_1000, phyreg;
1278
1279
       /* BMSR_LSTATUS is latched, read it twice:
1280
        * we want the current value.
1281
        */
1282
       mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1283
       mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1284
1285
       if (!(mii_status & BMSR_LSTATUS)) {
1286
               dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
1287
                               dev->name);
1288
               newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1289
               newdup = 0;
1290
               retval = 0;
1291
               goto set_speed;
1292
       }
1293
1294
       /* check auto negotiation is complete */
1295
       if (!(mii_status & BMSR_ANEGCOMPLETE)) {
1296
               /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
1297
               newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1298
               newdup = 0;
1299
               retval = 0;
1300
               dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
1301
               goto set_speed;
1302
       }
1303
1304
       retval = 1;
1305
       if (np->gigabit == PHY_GIGABIT) {
1306
               control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
1307
               status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
1308
1309
               if ((control_1000 & ADVERTISE_1000FULL) &&
1310
                       (status_1000 & LPA_1000FULL)) {
1311
               dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
1312
                               dev->name);
1313
                       newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
1314
                       newdup = 1;
1315
                       goto set_speed;
1316
               }
1317
       }
1046
1318
1047
       adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1319
       adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1048
       lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
1320
       lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
1049
       dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
1321
       dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
1050
                               dev->name, adv, lpa);
1322
                               dev->name, adv, lpa);
1051
1323
1052
       /* FIXME: handle parallel detection properly, handle gigabit ethernet */
1324
       /* FIXME: handle parallel detection properly */
1053
       lpa = lpa & adv;
1325
       lpa = lpa & adv;
1054
       if (lpa  & LPA_100FULL) {
1326
       if (lpa & LPA_100FULL) {
1055
               newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1327
               newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1056
               newdup = 1;
1328
               newdup = 1;
1057
       } else if (lpa & LPA_100HALF) {
1329
       } else if (lpa & LPA_100HALF) {
Lines 1068-1104 Link Here
1068
               newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1340
               newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1069
               newdup = 0;
1341
               newdup = 0;
1070
       }
1342
       }
1071
       if (np->duplex != newdup || np->linkspeed != newls) {
1072
               np->duplex = newdup;
1073
               np->linkspeed = newls;
1074
               return 1;
1075
       }
1076
       return 0;
1077
}
1078
1343
1079
static void nv_link_irq(struct net_device *dev)
1344
set_speed:
1080
{
1345
       if (np->duplex == newdup && np->linkspeed == newls)
1081
       struct fe_priv *np = get_nvpriv(dev);
1346
               return retval;
1082
       u8 *base = get_hwbase(dev);
1347
1083
       u32 miistat;
1348
       dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
1084
       int miival;
1349
                       dev->name, np->linkspeed, np->duplex, newls, newdup);
1350
1351
       np->duplex = newdup;
1352
       np->linkspeed = newls;
1353
1354
       if (np->gigabit == PHY_GIGABIT) {
1355
               phyreg = readl(base + NvRegRandomSeed);
1356
               phyreg &= ~(0x3FF00);
1357
               if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
1358
                       phyreg |= NVREG_RNDSEED_FORCE3;
1359
               else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
1360
                       phyreg |= NVREG_RNDSEED_FORCE2;
1361
               else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
1362
                       phyreg |= NVREG_RNDSEED_FORCE;
1363
               writel(phyreg, base + NvRegRandomSeed);
1364
       }
1365
1366
       phyreg = readl(base + NvRegPhyInterface);
1367
       phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
1368
       if (np->duplex == 0)
1369
               phyreg |= PHY_HALF;
1370
       if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
1371
               phyreg |= PHY_100;
1372
       else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
1373
               phyreg |= PHY_1000;
1374
       writel(phyreg, base + NvRegPhyInterface);
1085
1375
1086
       miistat = readl(base + NvRegMIIStatus);
1376
       writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1087
       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1377
               base + NvRegMisc1);
1088
       printk(KERN_DEBUG "%s: link change notification, status 0x%x.\n", dev->name, miistat);
1378
       pci_push(base);
1379
       writel(np->linkspeed, base + NvRegLinkSpeed);
1380
       pci_push(base);
1089
1381
1090
       miival = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1382
       return retval;
1091
       if (miival & BMSR_ANEGCOMPLETE) {
1383
}
1092
               nv_update_linkspeed(dev);
1093
1384
1385
static void nv_linkchange(struct net_device *dev)
1386
{
1387
       if (nv_update_linkspeed(dev)) {
1094
               if (netif_carrier_ok(dev)) {
1388
               if (netif_carrier_ok(dev)) {
1095
                       nv_stop_rx(dev);
1389
                       nv_stop_rx(dev);
1096
               } else {
1390
               } else {
1097
                       netif_carrier_on(dev);
1391
                       netif_carrier_on(dev);
1098
                       printk(KERN_INFO "%s: link up.\n", dev->name);
1392
                       printk(KERN_INFO "%s: link up.\n", dev->name);
1099
               }
1393
               }
1100
               writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1101
                                       base + NvRegMisc1);
1102
               nv_start_rx(dev);
1394
               nv_start_rx(dev);
1103
       } else {
1395
       } else {
1104
               if (netif_carrier_ok(dev)) {
1396
               if (netif_carrier_ok(dev)) {
Lines 1106-1116 Link Here
1106
                       printk(KERN_INFO "%s: link down.\n", dev->name);
1398
                       printk(KERN_INFO "%s: link down.\n", dev->name);
1107
                       nv_stop_rx(dev);
1399
                       nv_stop_rx(dev);
1108
               }
1400
               }
1109
               writel(np->linkspeed, base + NvRegLinkSpeed);
1110
               pci_push(base);
1111
       }
1401
       }
1112
}
1402
}
1113
1403
1404
static void nv_link_irq(struct net_device *dev)
1405
{
1406
       u8 *base = get_hwbase(dev);
1407
       u32 miistat;
1408
1409
       miistat = readl(base + NvRegMIIStatus);
1410
       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1411
       dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
1412
1413
       if (miistat & (NVREG_MIISTAT_LINKCHANGE))
1414
               nv_linkchange(dev);
1415
       dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
1416
}
1417
1114
static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1418
static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1115
{
1419
{
1116
       struct net_device *dev = (struct net_device *) data;
1420
       struct net_device *dev = (struct net_device *) data;
Lines 1135-1141 Link Here
1135
                       spin_unlock(&np->lock);
1439
                       spin_unlock(&np->lock);
1136
               }
1440
               }
1137
1441
1138
               if (events & (NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
1442
               if (events & (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF)) {
1139
                       nv_rx_process(dev);
1443
                       nv_rx_process(dev);
1140
                       if (nv_alloc_rx(dev)) {
1444
                       if (nv_alloc_rx(dev)) {
1141
                               spin_lock(&np->lock);
1445
                               spin_lock(&np->lock);
Lines 1150-1155 Link Here
1150
                       nv_link_irq(dev);
1454
                       nv_link_irq(dev);
1151
                       spin_unlock(&np->lock);
1455
                       spin_unlock(&np->lock);
1152
               }
1456
               }
1457
               if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
1458
                       spin_lock(&np->lock);
1459
                       nv_linkchange(dev);
1460
                       spin_unlock(&np->lock);
1461
                       np->link_timeout = jiffies + LINK_TIMEOUT;
1462
               }
1153
               if (events & (NVREG_IRQ_TX_ERR)) {
1463
               if (events & (NVREG_IRQ_TX_ERR)) {
1154
                       dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
1464
                       dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
1155
                                               dev->name, events);
1465
                                               dev->name, events);
Lines 1157-1163 Link Here
1157
               if (events & (NVREG_IRQ_UNKNOWN)) {
1467
               if (events & (NVREG_IRQ_UNKNOWN)) {
1158
                       printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
1468
                       printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
1159
                                               dev->name, events);
1469
                                               dev->name, events);
1160
               }
1470
               }
1161
               if (i > max_interrupt_work) {
1471
               if (i > max_interrupt_work) {
1162
                       spin_lock(&np->lock);
1472
                       spin_lock(&np->lock);
1163
                       /* disable interrupts on the nic */
1473
                       /* disable interrupts on the nic */
Lines 1210-1230 Link Here
1210
       writel(0, base + NvRegMulticastMaskA);
1520
       writel(0, base + NvRegMulticastMaskA);
1211
       writel(0, base + NvRegMulticastMaskB);
1521
       writel(0, base + NvRegMulticastMaskB);
1212
       writel(0, base + NvRegPacketFilterFlags);
1522
       writel(0, base + NvRegPacketFilterFlags);
1523
1524
       writel(0, base + NvRegTransmitterControl);
1525
       writel(0, base + NvRegReceiverControl);
1526
1213
       writel(0, base + NvRegAdapterControl);
1527
       writel(0, base + NvRegAdapterControl);
1528
1529
       /* 2) initialize descriptor rings */
1530
       oom = nv_init_ring(dev);
1531
1214
       writel(0, base + NvRegLinkSpeed);
1532
       writel(0, base + NvRegLinkSpeed);
1215
       writel(0, base + NvRegUnknownTransmitterReg);
1533
       writel(0, base + NvRegUnknownTransmitterReg);
1216
       nv_txrx_reset(dev);
1534
       nv_txrx_reset(dev);
1217
       writel(0, base + NvRegUnknownSetupReg6);
1535
       writel(0, base + NvRegUnknownSetupReg6);
1218
1536
1219
       /* 2) initialize descriptor rings */
1220
       np->in_shutdown = 0;
1537
       np->in_shutdown = 0;
1221
       oom = nv_init_ring(dev);
1222
1538
1223
       /* 3) set mac address */
1539
       /* 3) set mac address */
1224
       {
1540
       {
1225
               u32 mac[2];
1541
               u32 mac[2];
1226
1542
1227
               mac[0] = (dev->dev_addr[0] <<  0) + (dev->dev_addr[1] <<  8) +
1543
               mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1228
                               (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1544
                               (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1229
               mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1545
               mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1230
1546
Lines 1232-1284 Link Here
1232
               writel(mac[1], base + NvRegMacAddrB);
1548
               writel(mac[1], base + NvRegMacAddrB);
1233
       }
1549
       }
1234
1550
1235
       /* 4) continue setup */
1551
       /* 4) give hw rings */
1552
       writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1553
       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1554
       writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1555
               base + NvRegRingSizes);
1556
1557
       /* 5) continue setup */
1236
       np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1558
       np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1237
       np->duplex = 0;
1559
       np->duplex = 0;
1560
1561
       writel(np->linkspeed, base + NvRegLinkSpeed);
1238
       writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
1562
       writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
1239
       writel(0, base + NvRegTxRxControl);
1563
       writel(np->desc_ver, base + NvRegTxRxControl);
1240
       pci_push(base);
1564
       pci_push(base);
1241
       writel(NVREG_TXRXCTL_BIT1, base + NvRegTxRxControl);
1565
       writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl);
1242
       reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
1566
       reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
1243
                       NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
1567
                       NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
1244
                       KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
1568
                       KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
1245
       writel(0, base + NvRegUnknownSetupReg4);
1246
1247
       /* 5) Find a suitable PHY */
1248
       writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
1249
       for (i = 1; i < 32; i++) {
1250
               int id1, id2;
1251
1252
               spin_lock_irq(&np->lock);
1253
               id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ);
1254
               spin_unlock_irq(&np->lock);
1255
               if (id1 < 0 || id1 == 0xffff)
1256
                       continue;
1257
               spin_lock_irq(&np->lock);
1258
               id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ);
1259
               spin_unlock_irq(&np->lock);
1260
               if (id2 < 0 || id2 == 0xffff)
1261
                       continue;
1262
               dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
1263
                               dev->name, id1, id2, i);
1264
               np->phyaddr = i;
1265
1569
1266
               spin_lock_irq(&np->lock);
1570
       writel(0, base + NvRegUnknownSetupReg4);
1267
               nv_update_linkspeed(dev);
1571
       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1268
               spin_unlock_irq(&np->lock);
1572
       writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1269
1270
               break;
1271
       }
1272
       if (i == 32) {
1273
               printk(KERN_INFO "%s: open: failing due to lack of suitable PHY.\n",
1274
                               dev->name);
1275
               ret = -EINVAL;
1276
               goto out_drain;
1277
       }
1278
1573
1279
       /* 6) continue setup */
1574
       /* 6) continue setup */
1280
       writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
1575
       writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
1281
                               base + NvRegMisc1);
1282
       writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1576
       writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1283
       writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1577
       writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1284
       writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
1578
       writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
Lines 1290-1306 Link Here
1290
       writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
1584
       writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
1291
       writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval);
1585
       writel(NVREG_POLL_DEFAULT, base + NvRegPollingInterval);
1292
       writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
1586
       writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
1293
       writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID,
1587
       writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
1294
                       base + NvRegAdapterControl);
1588
                       base + NvRegAdapterControl);
1589
       writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
1295
       writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
1590
       writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
1296
       writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
1591
       writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
1297
1592
1298
       /* 7) start packet processing */
1299
       writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1300
       writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1301
       writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1302
                       base + NvRegRingSizes);
1303
1304
       i = readl(base + NvRegPowerState);
1593
       i = readl(base + NvRegPowerState);
1305
       if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
1594
       if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
1306
               writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
1595
               writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
Lines 1308-1320 Link Here
1308
       pci_push(base);
1597
       pci_push(base);
1309
       udelay(10);
1598
       udelay(10);
1310
       writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
1599
       writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
1311
       writel(NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
1312
1313
1600
1314
       writel(0, base + NvRegIrqMask);
1601
       writel(0, base + NvRegIrqMask);
1315
       pci_push(base);
1602
       pci_push(base);
1316
       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1317
       pci_push(base);
1318
       writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1603
       writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
1319
       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1604
       writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1320
       pci_push(base);
1605
       pci_push(base);
Lines 1323-1328 Link Here
1323
       if (ret)
1608
       if (ret)
1324
               goto out_drain;
1609
               goto out_drain;
1325
1610
1611
       /* ask for interrupts */
1326
       writel(np->irqmask, base + NvRegIrqMask);
1612
       writel(np->irqmask, base + NvRegIrqMask);
1327
1613
1328
       spin_lock_irq(&np->lock);
1614
       spin_lock_irq(&np->lock);
Lines 1331-1348 Link Here
1331
       writel(0, base + NvRegMulticastMaskA);
1617
       writel(0, base + NvRegMulticastMaskA);
1332
       writel(0, base + NvRegMulticastMaskB);
1618
       writel(0, base + NvRegMulticastMaskB);
1333
       writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
1619
       writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
1620
       /* One manual link speed update: Interrupts are enabled, future link
1621
        * speed changes cause interrupts and are handled by nv_link_irq().
1622
        */
1623
       {
1624
               u32 miistat;
1625
               miistat = readl(base + NvRegMIIStatus);
1626
               writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1627
               dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
1628
       }
1629
       ret = nv_update_linkspeed(dev);
1334
       nv_start_rx(dev);
1630
       nv_start_rx(dev);
1335
       nv_start_tx(dev);
1631
       nv_start_tx(dev);
1336
       netif_start_queue(dev);
1632
       netif_start_queue(dev);
1337
       if (oom)
1633
       if (ret) {
1338
               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1339
       if (mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ) & BMSR_ANEGCOMPLETE) {
1340
               netif_carrier_on(dev);
1634
               netif_carrier_on(dev);
1341
       } else {
1635
       } else {
1342
               printk("%s: no link during initialization.\n", dev->name);
1636
               printk("%s: no link during initialization.\n", dev->name);
1343
               netif_carrier_off(dev);
1637
               netif_carrier_off(dev);
1344
       }
1638
       }
1345
1639
       if (oom)
1640
               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1346
       spin_unlock_irq(&np->lock);
1641
       spin_unlock_irq(&np->lock);
1347
1642
1348
       return 0;
1643
       return 0;
Lines 1368-1376 Link Here
1368
       spin_lock_irq(&np->lock);
1663
       spin_lock_irq(&np->lock);
1369
       nv_stop_tx(dev);
1664
       nv_stop_tx(dev);
1370
       nv_stop_rx(dev);
1665
       nv_stop_rx(dev);
1371
       base = get_hwbase(dev);
1666
       nv_txrx_reset(dev);
1372
1667
1373
       /* disable interrupts on the nic or we will lock up */
1668
       /* disable interrupts on the nic or we will lock up */
1669
       base = get_hwbase(dev);
1374
       writel(0, base + NvRegIrqMask);
1670
       writel(0, base + NvRegIrqMask);
1375
       pci_push(base);
1671
       pci_push(base);
1376
       dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
1672
       dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
Lines 1424-1430 Link Here
1424
1720
1425
       pci_set_master(pci_dev);
1721
       pci_set_master(pci_dev);
1426
1722
1427
       err = pci_request_regions(pci_dev, dev->name);
1723
       err = pci_request_regions(pci_dev, DRV_NAME);
1428
       if (err < 0)
1724
       if (err < 0)
1429
               goto out_disable;
1725
               goto out_disable;
1430
1726
Lines 1447-1452 Link Here
1447
               goto out_relreg;
1743
               goto out_relreg;
1448
       }
1744
       }
1449
1745
1746
       /* handle different descriptor versions */
1747
       if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 ||
1748
               pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 ||
1749
               pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3)
1750
               np->desc_ver = DESC_VER_1;
1751
       else
1752
               np->desc_ver = DESC_VER_2;
1753
1450
       err = -ENOMEM;
1754
       err = -ENOMEM;
1451
       dev->base_addr = (unsigned long) ioremap(addr, NV_PCI_REGSZ);
1755
       dev->base_addr = (unsigned long) ioremap(addr, NV_PCI_REGSZ);
1452
       if (!dev->base_addr)
1756
       if (!dev->base_addr)
Lines 1464-1470 Link Here
1464
       dev->get_stats = nv_get_stats;
1768
       dev->get_stats = nv_get_stats;
1465
       dev->change_mtu = nv_change_mtu;
1769
       dev->change_mtu = nv_change_mtu;
1466
       dev->set_multicast_list = nv_set_multicast;
1770
       dev->set_multicast_list = nv_set_multicast;
1467
       dev->do_ioctl = nv_ioctl;
1771
       SET_ETHTOOL_OPS(dev, &ops);
1468
       dev->tx_timeout = nv_tx_timeout;
1772
       dev->tx_timeout = nv_tx_timeout;
1469
       dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
1773
       dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
1470
1774
Lines 1506-1520 Link Here
1506
       writel(0, base + NvRegWakeUpFlags);
1810
       writel(0, base + NvRegWakeUpFlags);
1507
       np->wolenabled = 0;
1811
       np->wolenabled = 0;
1508
1812
1509
       np->tx_flags = cpu_to_le16(NV_TX_LASTPACKET|NV_TX_LASTPACKET1|NV_TX_VALID);
1813
       if (np->desc_ver == DESC_VER_1) {
1510
       if (id->driver_data & DEV_NEED_LASTPACKET1)
1814
               np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
1511
               np->tx_flags |= cpu_to_le16(NV_TX_LASTPACKET1);
1815
               if (id->driver_data & DEV_NEED_LASTPACKET1)
1816
                       np->tx_flags |= NV_TX_LASTPACKET1;
1817
       } else {
1818
               np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
1819
               if (id->driver_data & DEV_NEED_LASTPACKET1)
1820
                       np->tx_flags |= NV_TX2_LASTPACKET1;
1821
       }
1512
       if (id->driver_data & DEV_IRQMASK_1)
1822
       if (id->driver_data & DEV_IRQMASK_1)
1513
               np->irqmask = NVREG_IRQMASK_WANTED_1;
1823
               np->irqmask = NVREG_IRQMASK_WANTED_1;
1514
       if (id->driver_data & DEV_IRQMASK_2)
1824
       if (id->driver_data & DEV_IRQMASK_2)
1515
               np->irqmask = NVREG_IRQMASK_WANTED_2;
1825
               np->irqmask = NVREG_IRQMASK_WANTED_2;
1516
       if (id->driver_data & DEV_NEED_TIMERIRQ)
1826
       if (id->driver_data & DEV_NEED_TIMERIRQ)
1517
               np->irqmask |= NVREG_IRQ_TIMER;
1827
               np->irqmask |= NVREG_IRQ_TIMER;
1828
       if (id->driver_data & DEV_NEED_LINKTIMER) {
1829
               dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
1830
               np->need_linktimer = 1;
1831
               np->link_timeout = jiffies + LINK_TIMEOUT;
1832
       } else {
1833
               dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
1834
               np->need_linktimer = 0;
1835
       }
1836
1837
       /* find a suitable phy */
1838
       for (i = 1; i < 32; i++) {
1839
               int id1, id2;
1840
1841
               spin_lock_irq(&np->lock);
1842
               id1 = mii_rw(dev, i, MII_PHYSID1, MII_READ);
1843
               spin_unlock_irq(&np->lock);
1844
               if (id1 < 0 || id1 == 0xffff)
1845
                       continue;
1846
               spin_lock_irq(&np->lock);
1847
               id2 = mii_rw(dev, i, MII_PHYSID2, MII_READ);
1848
               spin_unlock_irq(&np->lock);
1849
               if (id2 < 0 || id2 == 0xffff)
1850
                       continue;
1851
1852
               id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
1853
               id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
1854
               dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
1855
                               pci_name(pci_dev), id1, id2, i);
1856
               np->phyaddr = i;
1857
               np->phy_oui = id1 | id2;
1858
               break;
1859
       }
1860
       if (i == 32) {
1861
               /* PHY in isolate mode? No phy attached and user wants to
1862
                * test loopback? Very odd, but can be correct.
1863
                */
1864
               printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
1865
                               pci_name(pci_dev));
1866
       }
1867
1868
       if (i != 32) {
1869
               /* reset it */
1870
               phy_init(dev);
1871
       }
1518
1872
1519
       err = register_netdev(dev);
1873
       err = register_netdev(dev);
1520
       if (err) {
1874
       if (err) {
Lines 1569-1589 Link Here
1569
static struct pci_device_id pci_tbl[] = {
1923
static struct pci_device_id pci_tbl[] = {
1570
       {       /* nForce Ethernet Controller */
1924
       {       /* nForce Ethernet Controller */
1571
               .vendor = PCI_VENDOR_ID_NVIDIA,
1925
               .vendor = PCI_VENDOR_ID_NVIDIA,
1572
               .device = 0x1C3,
1926
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_1,
1573
               .subvendor = PCI_ANY_ID,
1927
               .subvendor = PCI_ANY_ID,
1574
               .subdevice = PCI_ANY_ID,
1928
               .subdevice = PCI_ANY_ID,
1575
               .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ,
1929
               .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
1576
       },
1930
       },
1577
       {       /* nForce2 Ethernet Controller */
1931
       {       /* nForce2 Ethernet Controller */
1578
               .vendor = PCI_VENDOR_ID_NVIDIA,
1932
               .vendor = PCI_VENDOR_ID_NVIDIA,
1579
               .device = 0x0066,
1933
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_2,
1934
               .subvendor = PCI_ANY_ID,
1935
               .subdevice = PCI_ANY_ID,
1936
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
1937
       },
1938
       {       /* nForce3 Ethernet Controller */
1939
               .vendor = PCI_VENDOR_ID_NVIDIA,
1940
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_3,
1941
               .subvendor = PCI_ANY_ID,
1942
               .subdevice = PCI_ANY_ID,
1943
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
1944
       },
1945
       {       /* nForce3 Ethernet Controller */
1946
               .vendor = PCI_VENDOR_ID_NVIDIA,
1947
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_4,
1580
               .subvendor = PCI_ANY_ID,
1948
               .subvendor = PCI_ANY_ID,
1581
               .subdevice = PCI_ANY_ID,
1949
               .subdevice = PCI_ANY_ID,
1582
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1950
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1583
       },
1951
       },
1584
       {       /* nForce3 Ethernet Controller */
1952
       {       /* nForce3 Ethernet Controller */
1585
               .vendor = PCI_VENDOR_ID_NVIDIA,
1953
               .vendor = PCI_VENDOR_ID_NVIDIA,
1586
               .device = 0x00D6,
1954
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_5,
1955
               .subvendor = PCI_ANY_ID,
1956
               .subdevice = PCI_ANY_ID,
1957
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1958
       },
1959
       {       /* nForce3 Ethernet Controller */
1960
               .vendor = PCI_VENDOR_ID_NVIDIA,
1961
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_6,
1962
               .subvendor = PCI_ANY_ID,
1963
               .subdevice = PCI_ANY_ID,
1964
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1965
       },
1966
       {       /* nForce3 Ethernet Controller */
1967
               .vendor = PCI_VENDOR_ID_NVIDIA,
1968
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_7,
1969
               .subvendor = PCI_ANY_ID,
1970
               .subdevice = PCI_ANY_ID,
1971
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1972
       },
1973
       {       /* CK804 Ethernet Controller */
1974
               .vendor = PCI_VENDOR_ID_NVIDIA,
1975
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_8,
1976
               .subvendor = PCI_ANY_ID,
1977
               .subdevice = PCI_ANY_ID,
1978
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1979
       },
1980
       {       /* CK804 Ethernet Controller */
1981
               .vendor = PCI_VENDOR_ID_NVIDIA,
1982
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_9,
1983
               .subvendor = PCI_ANY_ID,
1984
               .subdevice = PCI_ANY_ID,
1985
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1986
       },
1987
       {       /* MCP04 Ethernet Controller */
1988
               .vendor = PCI_VENDOR_ID_NVIDIA,
1989
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_10,
1990
               .subvendor = PCI_ANY_ID,
1991
               .subdevice = PCI_ANY_ID,
1992
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1993
       },
1994
       {       /* MCP04 Ethernet Controller */
1995
               .vendor = PCI_VENDOR_ID_NVIDIA,
1996
               .device = PCI_DEVICE_ID_NVIDIA_NVENET_11,
1587
               .subvendor = PCI_ANY_ID,
1997
               .subvendor = PCI_ANY_ID,
1588
               .subdevice = PCI_ANY_ID,
1998
               .subdevice = PCI_ANY_ID,
1589
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
1999
               .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ,
Lines 1610-1616 Link Here
1610
       pci_unregister_driver(&driver);
2020
       pci_unregister_driver(&driver);
1611
}
2021
}
1612
2022
1613
MODULE_PARM(max_interrupt_work, "i");
2023
module_param(max_interrupt_work, int, 0);
1614
MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
2024
MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
1615
 
2025
 
1616
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2026
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
(-)2.4/include/linux/pci_ids.h (+11 lines)
Lines 981-1004 Link Here
981
#define PCI_DEVICE_ID_NVIDIA_UVTNT2            0x002D
981
#define PCI_DEVICE_ID_NVIDIA_UVTNT2            0x002D
982
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE  0x0035
982
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE  0x0035
983
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
983
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
984
#define PCI_DEVICE_ID_NVIDIA_NVENET_10         0x0037
985
#define PCI_DEVICE_ID_NVIDIA_NVENET_11         0x0038
984
#define PCI_DEVICE_ID_NVIDIA_MCP04_AUDIO	0x003a
986
#define PCI_DEVICE_ID_NVIDIA_MCP04_AUDIO	0x003a
985
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2        0x003e
987
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2        0x003e
986
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE  0x0053
988
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE  0x0053
987
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054
989
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054
988
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2        0x0055
990
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2        0x0055
991
#define PCI_DEVICE_ID_NVIDIA_NVENET_8          0x0056
992
#define PCI_DEVICE_ID_NVIDIA_NVENET_9          0x0057
989
#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO	0x0059
993
#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO	0x0059
990
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE       0x0065
994
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE       0x0065
995
#define PCI_DEVICE_ID_NVIDIA_NVENET_2          0x0066
991
#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO                0x006a
996
#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO                0x006a
992
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE      0x0085
997
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE      0x0085
998
#define PCI_DEVICE_ID_NVIDIA_NVENET_4          0x0086
993
#define PCI_DEVICE_ID_NVIDIA_MCP2S_AUDIO	0x008a
999
#define PCI_DEVICE_ID_NVIDIA_MCP2S_AUDIO	0x008a
1000
#define PCI_DEVICE_ID_NVIDIA_NVENET_5          0x008c
994
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA     0x008e
1001
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA     0x008e
995
#define PCI_DEVICE_ID_NVIDIA_ITNT2             0x00A0
1002
#define PCI_DEVICE_ID_NVIDIA_ITNT2             0x00A0
996
#define PCI_DEVICE_ID_NVIDIA_NFORCE3           0x00d1
1003
#define PCI_DEVICE_ID_NVIDIA_NFORCE3           0x00d1
997
#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE       0x00d5
1004
#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE       0x00d5
1005
#define PCI_DEVICE_ID_NVIDIA_NVENET_3          0x00d6
998
#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO                0x00da
1006
#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO                0x00da
1007
#define PCI_DEVICE_ID_NVIDIA_NVENET_7          0x00df
999
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S          0x00e1
1008
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S          0x00e1
1000
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA     0x00e3
1009
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA     0x00e3
1001
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE      0x00e5
1010
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE      0x00e5
1011
#define PCI_DEVICE_ID_NVIDIA_NVENET_6          0x00e6
1002
#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO		0x00ea
1012
#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO		0x00ea
1003
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2    0x00ee
1013
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2    0x00ee
1004
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR       0x0100
1014
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR       0x0100
Lines 1016-1021 Link Here
1016
#define PCI_DEVICE_ID_NVIDIA_NFORCE            0x01a4
1026
#define PCI_DEVICE_ID_NVIDIA_NFORCE            0x01a4
1017
#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO                0x01b1
1027
#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO                0x01b1
1018
#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE                0x01bc
1028
#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE                0x01bc
1029
#define PCI_DEVICE_ID_NVIDIA_NVENET_1          0x01c3
1019
#define PCI_DEVICE_ID_NVIDIA_NFORCE2           0x01e0
1030
#define PCI_DEVICE_ID_NVIDIA_NFORCE2           0x01e0
1020
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3          0x0200
1031
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3          0x0200
1021
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1                0x0201
1032
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1                0x0201
1022
1033

Return to bug 74083