Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 196053 | Differences between
and this patch

Collapse All | Expand All

(-)a/drivers/net/e1000e/lib.c (+2487 lines)
Line 0 Link Here
1
/*******************************************************************************
2
3
  Intel PRO/1000 Linux driver
4
  Copyright(c) 1999 - 2007 Intel Corporation.
5
6
  This program is free software; you can redistribute it and/or modify it
7
  under the terms and conditions of the GNU General Public License,
8
  version 2, as published by the Free Software Foundation.
9
10
  This program is distributed in the hope it will be useful, but WITHOUT
11
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13
  more details.
14
15
  You should have received a copy of the GNU General Public License along with
16
  this program; if not, write to the Free Software Foundation, Inc.,
17
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19
  The full GNU General Public License is included in this distribution in
20
  the file called "COPYING".
21
22
  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27
*******************************************************************************/
28
29
#include <linux/netdevice.h>
30
#include <linux/ethtool.h>
31
#include <linux/delay.h>
32
#include <linux/pci.h>
33
34
#include "e1000.h"
35
36
enum e1000_mng_mode {
37
	e1000_mng_mode_none = 0,
38
	e1000_mng_mode_asf,
39
	e1000_mng_mode_pt,
40
	e1000_mng_mode_ipmi,
41
	e1000_mng_mode_host_if_only
42
};
43
44
#define E1000_FACTPS_MNGCG		0x20000000
45
46
#define E1000_IAMT_SIGNATURE		0x544D4149 /* Intel(R) Active Management
47
						    * Technology signature */
48
49
/**
50
 *  e1000e_get_bus_info_pcie - Get PCIe bus information
51
 *  @hw: pointer to the HW structure
52
 *
53
 *  Determines and stores the system bus information for a particular
54
 *  network interface.  The following bus information is determined and stored:
55
 *  bus speed, bus width, type (PCIe), and PCIe function.
56
 **/
57
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58
{
59
	struct e1000_bus_info *bus = &hw->bus;
60
	struct e1000_adapter *adapter = hw->adapter;
61
	u32 status;
62
	u16 pcie_link_status, pci_header_type, cap_offset;
63
64
	cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65
	if (!cap_offset) {
66
		bus->width = e1000_bus_width_unknown;
67
	} else {
68
		pci_read_config_word(adapter->pdev,
69
				     cap_offset + PCIE_LINK_STATUS,
70
				     &pcie_link_status);
71
		bus->width = (enum e1000_bus_width)((pcie_link_status &
72
						     PCIE_LINK_WIDTH_MASK) >>
73
						    PCIE_LINK_WIDTH_SHIFT);
74
	}
75
76
	pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77
			     &pci_header_type);
78
	if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79
		status = er32(STATUS);
80
		bus->func = (status & E1000_STATUS_FUNC_MASK)
81
			    >> E1000_STATUS_FUNC_SHIFT;
82
	} else {
83
		bus->func = 0;
84
	}
85
86
	return 0;
87
}
88
89
/**
90
 *  e1000e_write_vfta - Write value to VLAN filter table
91
 *  @hw: pointer to the HW structure
92
 *  @offset: register offset in VLAN filter table
93
 *  @value: register value written to VLAN filter table
94
 *
95
 *  Writes value at the given offset in the register array which stores
96
 *  the VLAN filter table.
97
 **/
98
void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
99
{
100
	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101
	e1e_flush();
102
}
103
104
/**
105
 *  e1000e_init_rx_addrs - Initialize receive address's
106
 *  @hw: pointer to the HW structure
107
 *  @rar_count: receive address registers
108
 *
109
 *  Setups the receive address registers by setting the base receive address
110
 *  register to the devices MAC address and clearing all the other receive
111
 *  address registers to 0.
112
 **/
113
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114
{
115
	u32 i;
116
117
	/* Setup the receive address */
118
	hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
119
120
	e1000e_rar_set(hw, hw->mac.addr, 0);
121
122
	/* Zero out the other (rar_entry_count - 1) receive addresses */
123
	hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124
	for (i = 1; i < rar_count; i++) {
125
		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126
		e1e_flush();
127
		E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128
		e1e_flush();
129
	}
130
}
131
132
/**
133
 *  e1000e_rar_set - Set receive address register
134
 *  @hw: pointer to the HW structure
135
 *  @addr: pointer to the receive address
136
 *  @index: receive address array register
137
 *
138
 *  Sets the receive address array register at index to the address passed
139
 *  in by addr.
140
 **/
141
void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142
{
143
	u32 rar_low, rar_high;
144
145
	/* HW expects these in little endian so we reverse the byte order
146
	 * from network order (big endian) to little endian
147
	 */
148
	rar_low = ((u32) addr[0] |
149
		   ((u32) addr[1] << 8) |
150
		    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
151
152
	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
153
154
	rar_high |= E1000_RAH_AV;
155
156
	E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
157
	E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
158
}
159
160
/**
161
 *  e1000_mta_set - Set multicast filter table address
162
 *  @hw: pointer to the HW structure
163
 *  @hash_value: determines the MTA register and bit to set
164
 *
165
 *  The multicast table address is a register array of 32-bit registers.
166
 *  The hash_value is used to determine what register the bit is in, the
167
 *  current value is read, the new bit is OR'd in and the new value is
168
 *  written back into the register.
169
 **/
170
static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
171
{
172
	u32 hash_bit, hash_reg, mta;
173
174
	/* The MTA is a register array of 32-bit registers. It is
175
	 * treated like an array of (32*mta_reg_count) bits.  We want to
176
	 * set bit BitArray[hash_value]. So we figure out what register
177
	 * the bit is in, read it, OR in the new bit, then write
178
	 * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
179
	 * mask to bits 31:5 of the hash value which gives us the
180
	 * register we're modifying.  The hash bit within that register
181
	 * is determined by the lower 5 bits of the hash value.
182
	 */
183
	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
184
	hash_bit = hash_value & 0x1F;
185
186
	mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
187
188
	mta |= (1 << hash_bit);
189
190
	E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
191
	e1e_flush();
192
}
193
194
/**
195
 *  e1000_hash_mc_addr - Generate a multicast hash value
196
 *  @hw: pointer to the HW structure
197
 *  @mc_addr: pointer to a multicast address
198
 *
199
 *  Generates a multicast address hash value which is used to determine
200
 *  the multicast filter table array address and new table value.  See
201
 *  e1000_mta_set_generic()
202
 **/
203
static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
204
{
205
	u32 hash_value, hash_mask;
206
	u8 bit_shift = 0;
207
208
	/* Register count multiplied by bits per register */
209
	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
210
211
	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
212
	 * where 0xFF would still fall within the hash mask. */
213
	while (hash_mask >> bit_shift != 0xFF)
214
		bit_shift++;
215
216
	/* The portion of the address that is used for the hash table
217
	 * is determined by the mc_filter_type setting.
218
	 * The algorithm is such that there is a total of 8 bits of shifting.
219
	 * The bit_shift for a mc_filter_type of 0 represents the number of
220
	 * left-shifts where the MSB of mc_addr[5] would still fall within
221
	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
222
	 * of 8 bits of shifting, then mc_addr[4] will shift right the
223
	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
224
	 * cases are a variation of this algorithm...essentially raising the
225
	 * number of bits to shift mc_addr[5] left, while still keeping the
226
	 * 8-bit shifting total.
227
	 */
228
	/* For example, given the following Destination MAC Address and an
229
	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
230
	 * we can see that the bit_shift for case 0 is 4.  These are the hash
231
	 * values resulting from each mc_filter_type...
232
	 * [0] [1] [2] [3] [4] [5]
233
	 * 01  AA  00  12  34  56
234
	 * LSB		 MSB
235
	 *
236
	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
237
	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
238
	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
239
	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
240
	 */
241
	switch (hw->mac.mc_filter_type) {
242
	default:
243
	case 0:
244
		break;
245
	case 1:
246
		bit_shift += 1;
247
		break;
248
	case 2:
249
		bit_shift += 2;
250
		break;
251
	case 3:
252
		bit_shift += 4;
253
		break;
254
	}
255
256
	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
257
				  (((u16) mc_addr[5]) << bit_shift)));
258
259
	return hash_value;
260
}
261
262
/**
263
 *  e1000e_mc_addr_list_update_generic - Update Multicast addresses
264
 *  @hw: pointer to the HW structure
265
 *  @mc_addr_list: array of multicast addresses to program
266
 *  @mc_addr_count: number of multicast addresses to program
267
 *  @rar_used_count: the first RAR register free to program
268
 *  @rar_count: total number of supported Receive Address Registers
269
 *
270
 *  Updates the Receive Address Registers and Multicast Table Array.
271
 *  The caller must have a packed mc_addr_list of multicast addresses.
272
 *  The parameter rar_count will usually be hw->mac.rar_entry_count
273
 *  unless there are workarounds that change this.
274
 **/
275
void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
276
				       u8 *mc_addr_list, u32 mc_addr_count,
277
				       u32 rar_used_count, u32 rar_count)
278
{
279
	u32 hash_value;
280
	u32 i;
281
282
	/* Load the first set of multicast addresses into the exact
283
	 * filters (RAR).  If there are not enough to fill the RAR
284
	 * array, clear the filters.
285
	 */
286
	for (i = rar_used_count; i < rar_count; i++) {
287
		if (mc_addr_count) {
288
			e1000e_rar_set(hw, mc_addr_list, i);
289
			mc_addr_count--;
290
			mc_addr_list += ETH_ALEN;
291
		} else {
292
			E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
293
			e1e_flush();
294
			E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
295
			e1e_flush();
296
		}
297
	}
298
299
	/* Clear the old settings from the MTA */
300
	hw_dbg(hw, "Clearing MTA\n");
301
	for (i = 0; i < hw->mac.mta_reg_count; i++) {
302
		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
303
		e1e_flush();
304
	}
305
306
	/* Load any remaining multicast addresses into the hash table. */
307
	for (; mc_addr_count > 0; mc_addr_count--) {
308
		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
309
		hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
310
		e1000_mta_set(hw, hash_value);
311
		mc_addr_list += ETH_ALEN;
312
	}
313
}
314
315
/**
316
 *  e1000e_clear_hw_cntrs_base - Clear base hardware counters
317
 *  @hw: pointer to the HW structure
318
 *
319
 *  Clears the base hardware counters by reading the counter registers.
320
 **/
321
void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
322
{
323
	u32 temp;
324
325
	temp = er32(CRCERRS);
326
	temp = er32(SYMERRS);
327
	temp = er32(MPC);
328
	temp = er32(SCC);
329
	temp = er32(ECOL);
330
	temp = er32(MCC);
331
	temp = er32(LATECOL);
332
	temp = er32(COLC);
333
	temp = er32(DC);
334
	temp = er32(SEC);
335
	temp = er32(RLEC);
336
	temp = er32(XONRXC);
337
	temp = er32(XONTXC);
338
	temp = er32(XOFFRXC);
339
	temp = er32(XOFFTXC);
340
	temp = er32(FCRUC);
341
	temp = er32(GPRC);
342
	temp = er32(BPRC);
343
	temp = er32(MPRC);
344
	temp = er32(GPTC);
345
	temp = er32(GORCL);
346
	temp = er32(GORCH);
347
	temp = er32(GOTCL);
348
	temp = er32(GOTCH);
349
	temp = er32(RNBC);
350
	temp = er32(RUC);
351
	temp = er32(RFC);
352
	temp = er32(ROC);
353
	temp = er32(RJC);
354
	temp = er32(TORL);
355
	temp = er32(TORH);
356
	temp = er32(TOTL);
357
	temp = er32(TOTH);
358
	temp = er32(TPR);
359
	temp = er32(TPT);
360
	temp = er32(MPTC);
361
	temp = er32(BPTC);
362
}
363
364
/**
365
 *  e1000e_check_for_copper_link - Check for link (Copper)
366
 *  @hw: pointer to the HW structure
367
 *
368
 *  Checks to see of the link status of the hardware has changed.  If a
369
 *  change in link status has been detected, then we read the PHY registers
370
 *  to get the current speed/duplex if link exists.
371
 **/
372
s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
373
{
374
	struct e1000_mac_info *mac = &hw->mac;
375
	s32 ret_val;
376
	bool link;
377
378
	/* We only want to go out to the PHY registers to see if Auto-Neg
379
	 * has completed and/or if our link status has changed.  The
380
	 * get_link_status flag is set upon receiving a Link Status
381
	 * Change or Rx Sequence Error interrupt.
382
	 */
383
	if (!mac->get_link_status)
384
		return 0;
385
386
	/* First we want to see if the MII Status Register reports
387
	 * link.  If so, then we want to get the current speed/duplex
388
	 * of the PHY.
389
	 */
390
	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
391
	if (ret_val)
392
		return ret_val;
393
394
	if (!link)
395
		return ret_val; /* No link detected */
396
397
	mac->get_link_status = 0;
398
399
	/* Check if there was DownShift, must be checked
400
	 * immediately after link-up */
401
	e1000e_check_downshift(hw);
402
403
	/* If we are forcing speed/duplex, then we simply return since
404
	 * we have already determined whether we have link or not.
405
	 */
406
	if (!mac->autoneg) {
407
		ret_val = -E1000_ERR_CONFIG;
408
		return ret_val;
409
	}
410
411
	/* Auto-Neg is enabled.  Auto Speed Detection takes care
412
	 * of MAC speed/duplex configuration.  So we only need to
413
	 * configure Collision Distance in the MAC.
414
	 */
415
	e1000e_config_collision_dist(hw);
416
417
	/* Configure Flow Control now that Auto-Neg has completed.
418
	 * First, we need to restore the desired flow control
419
	 * settings because we may have had to re-autoneg with a
420
	 * different link partner.
421
	 */
422
	ret_val = e1000e_config_fc_after_link_up(hw);
423
	if (ret_val) {
424
		hw_dbg(hw, "Error configuring flow control\n");
425
	}
426
427
	return ret_val;
428
}
429
430
/**
431
 *  e1000e_check_for_fiber_link - Check for link (Fiber)
432
 *  @hw: pointer to the HW structure
433
 *
434
 *  Checks for link up on the hardware.  If link is not up and we have
435
 *  a signal, then we need to force link up.
436
 **/
437
s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
438
{
439
	struct e1000_mac_info *mac = &hw->mac;
440
	u32 rxcw;
441
	u32 ctrl;
442
	u32 status;
443
	s32 ret_val;
444
445
	ctrl = er32(CTRL);
446
	status = er32(STATUS);
447
	rxcw = er32(RXCW);
448
449
	/* If we don't have link (auto-negotiation failed or link partner
450
	 * cannot auto-negotiate), the cable is plugged in (we have signal),
451
	 * and our link partner is not trying to auto-negotiate with us (we
452
	 * are receiving idles or data), we need to force link up. We also
453
	 * need to give auto-negotiation time to complete, in case the cable
454
	 * was just plugged in. The autoneg_failed flag does this.
455
	 */
456
	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
457
	if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
458
	    (!(rxcw & E1000_RXCW_C))) {
459
		if (mac->autoneg_failed == 0) {
460
			mac->autoneg_failed = 1;
461
			return 0;
462
		}
463
		hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
464
465
		/* Disable auto-negotiation in the TXCW register */
466
		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
467
468
		/* Force link-up and also force full-duplex. */
469
		ctrl = er32(CTRL);
470
		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
471
		ew32(CTRL, ctrl);
472
473
		/* Configure Flow Control after forcing link up. */
474
		ret_val = e1000e_config_fc_after_link_up(hw);
475
		if (ret_val) {
476
			hw_dbg(hw, "Error configuring flow control\n");
477
			return ret_val;
478
		}
479
	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
480
		/* If we are forcing link and we are receiving /C/ ordered
481
		 * sets, re-enable auto-negotiation in the TXCW register
482
		 * and disable forced link in the Device Control register
483
		 * in an attempt to auto-negotiate with our link partner.
484
		 */
485
		hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
486
		ew32(TXCW, mac->txcw);
487
		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
488
489
		mac->serdes_has_link = 1;
490
	}
491
492
	return 0;
493
}
494
495
/**
496
 *  e1000e_check_for_serdes_link - Check for link (Serdes)
497
 *  @hw: pointer to the HW structure
498
 *
499
 *  Checks for link up on the hardware.  If link is not up and we have
500
 *  a signal, then we need to force link up.
501
 **/
502
s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
503
{
504
	struct e1000_mac_info *mac = &hw->mac;
505
	u32 rxcw;
506
	u32 ctrl;
507
	u32 status;
508
	s32 ret_val;
509
510
	ctrl = er32(CTRL);
511
	status = er32(STATUS);
512
	rxcw = er32(RXCW);
513
514
	/* If we don't have link (auto-negotiation failed or link partner
515
	 * cannot auto-negotiate), and our link partner is not trying to
516
	 * auto-negotiate with us (we are receiving idles or data),
517
	 * we need to force link up. We also need to give auto-negotiation
518
	 * time to complete.
519
	 */
520
	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
521
	if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
522
		if (mac->autoneg_failed == 0) {
523
			mac->autoneg_failed = 1;
524
			return 0;
525
		}
526
		hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
527
528
		/* Disable auto-negotiation in the TXCW register */
529
		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
530
531
		/* Force link-up and also force full-duplex. */
532
		ctrl = er32(CTRL);
533
		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
534
		ew32(CTRL, ctrl);
535
536
		/* Configure Flow Control after forcing link up. */
537
		ret_val = e1000e_config_fc_after_link_up(hw);
538
		if (ret_val) {
539
			hw_dbg(hw, "Error configuring flow control\n");
540
			return ret_val;
541
		}
542
	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
543
		/* If we are forcing link and we are receiving /C/ ordered
544
		 * sets, re-enable auto-negotiation in the TXCW register
545
		 * and disable forced link in the Device Control register
546
		 * in an attempt to auto-negotiate with our link partner.
547
		 */
548
		hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
549
		ew32(TXCW, mac->txcw);
550
		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
551
552
		mac->serdes_has_link = 1;
553
	} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
554
		/* If we force link for non-auto-negotiation switch, check
555
		 * link status based on MAC synchronization for internal
556
		 * serdes media type.
557
		 */
558
		/* SYNCH bit and IV bit are sticky. */
559
		udelay(10);
560
		if (E1000_RXCW_SYNCH & er32(RXCW)) {
561
			if (!(rxcw & E1000_RXCW_IV)) {
562
				mac->serdes_has_link = 1;
563
				hw_dbg(hw, "SERDES: Link is up.\n");
564
			}
565
		} else {
566
			mac->serdes_has_link = 0;
567
			hw_dbg(hw, "SERDES: Link is down.\n");
568
		}
569
	}
570
571
	if (E1000_TXCW_ANE & er32(TXCW)) {
572
		status = er32(STATUS);
573
		mac->serdes_has_link = (status & E1000_STATUS_LU);
574
	}
575
576
	return 0;
577
}
578
579
/**
580
 *  e1000_set_default_fc_generic - Set flow control default values
581
 *  @hw: pointer to the HW structure
582
 *
583
 *  Read the EEPROM for the default values for flow control and store the
584
 *  values.
585
 **/
586
static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
587
{
588
	struct e1000_mac_info *mac = &hw->mac;
589
	s32 ret_val;
590
	u16 nvm_data;
591
592
	if (mac->fc != e1000_fc_default)
593
		return 0;
594
595
	/* Read and store word 0x0F of the EEPROM. This word contains bits
596
	 * that determine the hardware's default PAUSE (flow control) mode,
597
	 * a bit that determines whether the HW defaults to enabling or
598
	 * disabling auto-negotiation, and the direction of the
599
	 * SW defined pins. If there is no SW over-ride of the flow
600
	 * control setting, then the variable hw->fc will
601
	 * be initialized based on a value in the EEPROM.
602
	 */
603
	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
604
605
	if (ret_val) {
606
		hw_dbg(hw, "NVM Read Error\n");
607
		return ret_val;
608
	}
609
610
	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
611
		mac->fc = e1000_fc_none;
612
	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
613
		 NVM_WORD0F_ASM_DIR)
614
		mac->fc = e1000_fc_tx_pause;
615
	else
616
		mac->fc = e1000_fc_full;
617
618
	return 0;
619
}
620
621
/**
622
 *  e1000e_setup_link - Setup flow control and link settings
623
 *  @hw: pointer to the HW structure
624
 *
625
 *  Determines which flow control settings to use, then configures flow
626
 *  control.  Calls the appropriate media-specific link configuration
627
 *  function.  Assuming the adapter has a valid link partner, a valid link
628
 *  should be established.  Assumes the hardware has previously been reset
629
 *  and the transmitter and receiver are not enabled.
630
 **/
631
s32 e1000e_setup_link(struct e1000_hw *hw)
632
{
633
	struct e1000_mac_info *mac = &hw->mac;
634
	s32 ret_val;
635
636
	/* In the case of the phy reset being blocked, we already have a link.
637
	 * We do not need to set it up again.
638
	 */
639
	if (e1000_check_reset_block(hw))
640
		return 0;
641
642
	ret_val = e1000_set_default_fc_generic(hw);
643
	if (ret_val)
644
		return ret_val;
645
646
	/* We want to save off the original Flow Control configuration just
647
	 * in case we get disconnected and then reconnected into a different
648
	 * hub or switch with different Flow Control capabilities.
649
	 */
650
	mac->original_fc = mac->fc;
651
652
	hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
653
654
	/* Call the necessary media_type subroutine to configure the link. */
655
	ret_val = mac->ops.setup_physical_interface(hw);
656
	if (ret_val)
657
		return ret_val;
658
659
	/* Initialize the flow control address, type, and PAUSE timer
660
	 * registers to their default values.  This is done even if flow
661
	 * control is disabled, because it does not hurt anything to
662
	 * initialize these registers.
663
	 */
664
	hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
665
	ew32(FCT, FLOW_CONTROL_TYPE);
666
	ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
667
	ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
668
669
	ew32(FCTTV, mac->fc_pause_time);
670
671
	return e1000e_set_fc_watermarks(hw);
672
}
673
674
/**
675
 *  e1000_commit_fc_settings_generic - Configure flow control
676
 *  @hw: pointer to the HW structure
677
 *
678
 *  Write the flow control settings to the Transmit Config Word Register (TXCW)
679
 *  base on the flow control settings in e1000_mac_info.
680
 **/
681
static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
682
{
683
	struct e1000_mac_info *mac = &hw->mac;
684
	u32 txcw;
685
686
	/* Check for a software override of the flow control settings, and
687
	 * setup the device accordingly.  If auto-negotiation is enabled, then
688
	 * software will have to set the "PAUSE" bits to the correct value in
689
	 * the Transmit Config Word Register (TXCW) and re-start auto-
690
	 * negotiation.  However, if auto-negotiation is disabled, then
691
	 * software will have to manually configure the two flow control enable
692
	 * bits in the CTRL register.
693
	 *
694
	 * The possible values of the "fc" parameter are:
695
	 *      0:  Flow control is completely disabled
696
	 *      1:  Rx flow control is enabled (we can receive pause frames,
697
	 *	  but not send pause frames).
698
	 *      2:  Tx flow control is enabled (we can send pause frames but we
699
	 *	  do not support receiving pause frames).
700
	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
701
	 */
702
	switch (mac->fc) {
703
	case e1000_fc_none:
704
		/* Flow control completely disabled by a software over-ride. */
705
		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
706
		break;
707
	case e1000_fc_rx_pause:
708
		/* RX Flow control is enabled and TX Flow control is disabled
709
		 * by a software over-ride. Since there really isn't a way to
710
		 * advertise that we are capable of RX Pause ONLY, we will
711
		 * advertise that we support both symmetric and asymmetric RX
712
		 * PAUSE.  Later, we will disable the adapter's ability to send
713
		 * PAUSE frames.
714
		 */
715
		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
716
		break;
717
	case e1000_fc_tx_pause:
718
		/* TX Flow control is enabled, and RX Flow control is disabled,
719
		 * by a software over-ride.
720
		 */
721
		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
722
		break;
723
	case e1000_fc_full:
724
		/* Flow control (both RX and TX) is enabled by a software
725
		 * over-ride.
726
		 */
727
		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
728
		break;
729
	default:
730
		hw_dbg(hw, "Flow control param set incorrectly\n");
731
		return -E1000_ERR_CONFIG;
732
		break;
733
	}
734
735
	ew32(TXCW, txcw);
736
	mac->txcw = txcw;
737
738
	return 0;
739
}
740
741
/**
742
 *  e1000_poll_fiber_serdes_link_generic - Poll for link up
743
 *  @hw: pointer to the HW structure
744
 *
745
 *  Polls for link up by reading the status register, if link fails to come
746
 *  up with auto-negotiation, then the link is forced if a signal is detected.
747
 **/
748
static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
749
{
750
	struct e1000_mac_info *mac = &hw->mac;
751
	u32 i, status;
752
	s32 ret_val;
753
754
	/* If we have a signal (the cable is plugged in, or assumed true for
755
	 * serdes media) then poll for a "Link-Up" indication in the Device
756
	 * Status Register.  Time-out if a link isn't seen in 500 milliseconds
757
	 * seconds (Auto-negotiation should complete in less than 500
758
	 * milliseconds even if the other end is doing it in SW).
759
	 */
760
	for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
761
		msleep(10);
762
		status = er32(STATUS);
763
		if (status & E1000_STATUS_LU)
764
			break;
765
	}
766
	if (i == FIBER_LINK_UP_LIMIT) {
767
		hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
768
		mac->autoneg_failed = 1;
769
		/* AutoNeg failed to achieve a link, so we'll call
770
		 * mac->check_for_link. This routine will force the
771
		 * link up if we detect a signal. This will allow us to
772
		 * communicate with non-autonegotiating link partners.
773
		 */
774
		ret_val = mac->ops.check_for_link(hw);
775
		if (ret_val) {
776
			hw_dbg(hw, "Error while checking for link\n");
777
			return ret_val;
778
		}
779
		mac->autoneg_failed = 0;
780
	} else {
781
		mac->autoneg_failed = 0;
782
		hw_dbg(hw, "Valid Link Found\n");
783
	}
784
785
	return 0;
786
}
787
788
/**
789
 *  e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
790
 *  @hw: pointer to the HW structure
791
 *
792
 *  Configures collision distance and flow control for fiber and serdes
793
 *  links.  Upon successful setup, poll for link.
794
 **/
795
s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
796
{
797
	u32 ctrl;
798
	s32 ret_val;
799
800
	ctrl = er32(CTRL);
801
802
	/* Take the link out of reset */
803
	ctrl &= ~E1000_CTRL_LRST;
804
805
	e1000e_config_collision_dist(hw);
806
807
	ret_val = e1000_commit_fc_settings_generic(hw);
808
	if (ret_val)
809
		return ret_val;
810
811
	/* Since auto-negotiation is enabled, take the link out of reset (the
812
	 * link will be in reset, because we previously reset the chip). This
813
	 * will restart auto-negotiation.  If auto-negotiation is successful
814
	 * then the link-up status bit will be set and the flow control enable
815
	 * bits (RFCE and TFCE) will be set according to their negotiated value.
816
	 */
817
	hw_dbg(hw, "Auto-negotiation enabled\n");
818
819
	ew32(CTRL, ctrl);
820
	e1e_flush();
821
	msleep(1);
822
823
	/* For these adapters, the SW defineable pin 1 is set when the optics
824
	 * detect a signal.  If we have a signal, then poll for a "Link-Up"
825
	 * indication.
826
	 */
827
	if (hw->media_type == e1000_media_type_internal_serdes ||
828
	    (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
829
		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
830
	} else {
831
		hw_dbg(hw, "No signal detected\n");
832
	}
833
834
	return 0;
835
}
836
837
/**
838
 *  e1000e_config_collision_dist - Configure collision distance
839
 *  @hw: pointer to the HW structure
840
 *
841
 *  Configures the collision distance to the default value and is used
842
 *  during link setup. Currently no func pointer exists and all
843
 *  implementations are handled in the generic version of this function.
844
 **/
845
void e1000e_config_collision_dist(struct e1000_hw *hw)
846
{
847
	u32 tctl;
848
849
	tctl = er32(TCTL);
850
851
	tctl &= ~E1000_TCTL_COLD;
852
	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
853
854
	ew32(TCTL, tctl);
855
	e1e_flush();
856
}
857
858
/**
859
 *  e1000e_set_fc_watermarks - Set flow control high/low watermarks
860
 *  @hw: pointer to the HW structure
861
 *
862
 *  Sets the flow control high/low threshold (watermark) registers.  If
863
 *  flow control XON frame transmission is enabled, then set XON frame
864
 *  tansmission as well.
865
 **/
866
s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
867
{
868
	struct e1000_mac_info *mac = &hw->mac;
869
	u32 fcrtl = 0, fcrth = 0;
870
871
	/* Set the flow control receive threshold registers.  Normally,
872
	 * these registers will be set to a default threshold that may be
873
	 * adjusted later by the driver's runtime code.  However, if the
874
	 * ability to transmit pause frames is not enabled, then these
875
	 * registers will be set to 0.
876
	 */
877
	if (mac->fc & e1000_fc_tx_pause) {
878
		/* We need to set up the Receive Threshold high and low water
879
		 * marks as well as (optionally) enabling the transmission of
880
		 * XON frames.
881
		 */
882
		fcrtl = mac->fc_low_water;
883
		fcrtl |= E1000_FCRTL_XONE;
884
		fcrth = mac->fc_high_water;
885
	}
886
	ew32(FCRTL, fcrtl);
887
	ew32(FCRTH, fcrth);
888
889
	return 0;
890
}
891
892
/**
893
 *  e1000e_force_mac_fc - Force the MAC's flow control settings
894
 *  @hw: pointer to the HW structure
895
 *
896
 *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
897
 *  device control register to reflect the adapter settings.  TFCE and RFCE
898
 *  need to be explicitly set by software when a copper PHY is used because
899
 *  autonegotiation is managed by the PHY rather than the MAC.  Software must
900
 *  also configure these bits when link is forced on a fiber connection.
901
 **/
902
s32 e1000e_force_mac_fc(struct e1000_hw *hw)
903
{
904
	struct e1000_mac_info *mac = &hw->mac;
905
	u32 ctrl;
906
907
	ctrl = er32(CTRL);
908
909
	/* Because we didn't get link via the internal auto-negotiation
910
	 * mechanism (we either forced link or we got link via PHY
911
	 * auto-neg), we have to manually enable/disable transmit an
912
	 * receive flow control.
913
	 *
914
	 * The "Case" statement below enables/disable flow control
915
	 * according to the "mac->fc" parameter.
916
	 *
917
	 * The possible values of the "fc" parameter are:
918
	 *      0:  Flow control is completely disabled
919
	 *      1:  Rx flow control is enabled (we can receive pause
920
	 *	  frames but not send pause frames).
921
	 *      2:  Tx flow control is enabled (we can send pause frames
922
	 *	  frames but we do not receive pause frames).
923
	 *      3:  Both Rx and TX flow control (symmetric) is enabled.
924
	 *  other:  No other values should be possible at this point.
925
	 */
926
	hw_dbg(hw, "mac->fc = %u\n", mac->fc);
927
928
	switch (mac->fc) {
929
	case e1000_fc_none:
930
		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
931
		break;
932
	case e1000_fc_rx_pause:
933
		ctrl &= (~E1000_CTRL_TFCE);
934
		ctrl |= E1000_CTRL_RFCE;
935
		break;
936
	case e1000_fc_tx_pause:
937
		ctrl &= (~E1000_CTRL_RFCE);
938
		ctrl |= E1000_CTRL_TFCE;
939
		break;
940
	case e1000_fc_full:
941
		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
942
		break;
943
	default:
944
		hw_dbg(hw, "Flow control param set incorrectly\n");
945
		return -E1000_ERR_CONFIG;
946
	}
947
948
	ew32(CTRL, ctrl);
949
950
	return 0;
951
}
952
953
/**
954
 *  e1000e_config_fc_after_link_up - Configures flow control after link
955
 *  @hw: pointer to the HW structure
956
 *
957
 *  Checks the status of auto-negotiation after link up to ensure that the
958
 *  speed and duplex were not forced.  If the link needed to be forced, then
959
 *  flow control needs to be forced also.  If auto-negotiation is enabled
960
 *  and did not fail, then we configure flow control based on our link
961
 *  partner.
962
 **/
963
s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
964
{
965
	struct e1000_mac_info *mac = &hw->mac;
966
	s32 ret_val = 0;
967
	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
968
	u16 speed, duplex;
969
970
	/* Check for the case where we have fiber media and auto-neg failed
971
	 * so we had to force link.  In this case, we need to force the
972
	 * configuration of the MAC to match the "fc" parameter.
973
	 */
974
	if (mac->autoneg_failed) {
975
		if (hw->media_type == e1000_media_type_fiber ||
976
		    hw->media_type == e1000_media_type_internal_serdes)
977
			ret_val = e1000e_force_mac_fc(hw);
978
	} else {
979
		if (hw->media_type == e1000_media_type_copper)
980
			ret_val = e1000e_force_mac_fc(hw);
981
	}
982
983
	if (ret_val) {
984
		hw_dbg(hw, "Error forcing flow control settings\n");
985
		return ret_val;
986
	}
987
988
	/* Check for the case where we have copper media and auto-neg is
989
	 * enabled.  In this case, we need to check and see if Auto-Neg
990
	 * has completed, and if so, how the PHY and link partner has
991
	 * flow control configured.
992
	 */
993
	if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
994
		/* Read the MII Status Register and check to see if AutoNeg
995
		 * has completed.  We read this twice because this reg has
996
		 * some "sticky" (latched) bits.
997
		 */
998
		ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
999
		if (ret_val)
1000
			return ret_val;
1001
		ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1002
		if (ret_val)
1003
			return ret_val;
1004
1005
		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1006
			hw_dbg(hw, "Copper PHY and Auto Neg "
1007
				 "has not completed.\n");
1008
			return ret_val;
1009
		}
1010
1011
		/* The AutoNeg process has completed, so we now need to
1012
		 * read both the Auto Negotiation Advertisement
1013
		 * Register (Address 4) and the Auto_Negotiation Base
1014
		 * Page Ability Register (Address 5) to determine how
1015
		 * flow control was negotiated.
1016
		 */
1017
		ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1018
		if (ret_val)
1019
			return ret_val;
1020
		ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1021
		if (ret_val)
1022
			return ret_val;
1023
1024
		/* Two bits in the Auto Negotiation Advertisement Register
1025
		 * (Address 4) and two bits in the Auto Negotiation Base
1026
		 * Page Ability Register (Address 5) determine flow control
1027
		 * for both the PHY and the link partner.  The following
1028
		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1029
		 * 1999, describes these PAUSE resolution bits and how flow
1030
		 * control is determined based upon these settings.
1031
		 * NOTE:  DC = Don't Care
1032
		 *
1033
		 *   LOCAL DEVICE  |   LINK PARTNER
1034
		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1035
		 *-------|---------|-------|---------|--------------------
1036
		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
1037
		 *   0   |    1    |   0   |   DC    | e1000_fc_none
1038
		 *   0   |    1    |   1   |    0    | e1000_fc_none
1039
		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
1040
		 *   1   |    0    |   0   |   DC    | e1000_fc_none
1041
		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
1042
		 *   1   |    1    |   0   |    0    | e1000_fc_none
1043
		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
1044
		 *
1045
		 */
1046
		/* Are both PAUSE bits set to 1?  If so, this implies
1047
		 * Symmetric Flow Control is enabled at both ends.  The
1048
		 * ASM_DIR bits are irrelevant per the spec.
1049
		 *
1050
		 * For Symmetric Flow Control:
1051
		 *
1052
		 *   LOCAL DEVICE  |   LINK PARTNER
1053
		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1054
		 *-------|---------|-------|---------|--------------------
1055
		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
1056
		 *
1057
		 */
1058
		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1059
		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1060
			/* Now we need to check if the user selected RX ONLY
1061
			 * of pause frames.  In this case, we had to advertise
1062
			 * FULL flow control because we could not advertise RX
1063
			 * ONLY. Hence, we must now check to see if we need to
1064
			 * turn OFF  the TRANSMISSION of PAUSE frames.
1065
			 */
1066
			if (mac->original_fc == e1000_fc_full) {
1067
				mac->fc = e1000_fc_full;
1068
				hw_dbg(hw, "Flow Control = FULL.\r\n");
1069
			} else {
1070
				mac->fc = e1000_fc_rx_pause;
1071
				hw_dbg(hw, "Flow Control = "
1072
					 "RX PAUSE frames only.\r\n");
1073
			}
1074
		}
1075
		/* For receiving PAUSE frames ONLY.
1076
		 *
1077
		 *   LOCAL DEVICE  |   LINK PARTNER
1078
		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1079
		 *-------|---------|-------|---------|--------------------
1080
		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
1081
		 *
1082
		 */
1083
		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1084
			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1085
			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1086
			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1087
			mac->fc = e1000_fc_tx_pause;
1088
			hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
1089
		}
1090
		/* For transmitting PAUSE frames ONLY.
1091
		 *
1092
		 *   LOCAL DEVICE  |   LINK PARTNER
1093
		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1094
		 *-------|---------|-------|---------|--------------------
1095
		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
1096
		 *
1097
		 */
1098
		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1099
			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1100
			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1101
			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1102
			mac->fc = e1000_fc_rx_pause;
1103
			hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1104
		}
1105
		/* Per the IEEE spec, at this point flow control should be
1106
		 * disabled.  However, we want to consider that we could
1107
		 * be connected to a legacy switch that doesn't advertise
1108
		 * desired flow control, but can be forced on the link
1109
		 * partner.  So if we advertised no flow control, that is
1110
		 * what we will resolve to.  If we advertised some kind of
1111
		 * receive capability (Rx Pause Only or Full Flow Control)
1112
		 * and the link partner advertised none, we will configure
1113
		 * ourselves to enable Rx Flow Control only.  We can do
1114
		 * this safely for two reasons:  If the link partner really
1115
		 * didn't want flow control enabled, and we enable Rx, no
1116
		 * harm done since we won't be receiving any PAUSE frames
1117
		 * anyway.  If the intent on the link partner was to have
1118
		 * flow control enabled, then by us enabling RX only, we
1119
		 * can at least receive pause frames and process them.
1120
		 * This is a good idea because in most cases, since we are
1121
		 * predominantly a server NIC, more times than not we will
1122
		 * be asked to delay transmission of packets than asking
1123
		 * our link partner to pause transmission of frames.
1124
		 */
1125
		else if ((mac->original_fc == e1000_fc_none) ||
1126
			 (mac->original_fc == e1000_fc_tx_pause)) {
1127
			mac->fc = e1000_fc_none;
1128
			hw_dbg(hw, "Flow Control = NONE.\r\n");
1129
		} else {
1130
			mac->fc = e1000_fc_rx_pause;
1131
			hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1132
		}
1133
1134
		/* Now we need to do one last check...  If we auto-
1135
		 * negotiated to HALF DUPLEX, flow control should not be
1136
		 * enabled per IEEE 802.3 spec.
1137
		 */
1138
		ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1139
		if (ret_val) {
1140
			hw_dbg(hw, "Error getting link speed and duplex\n");
1141
			return ret_val;
1142
		}
1143
1144
		if (duplex == HALF_DUPLEX)
1145
			mac->fc = e1000_fc_none;
1146
1147
		/* Now we call a subroutine to actually force the MAC
1148
		 * controller to use the correct flow control settings.
1149
		 */
1150
		ret_val = e1000e_force_mac_fc(hw);
1151
		if (ret_val) {
1152
			hw_dbg(hw, "Error forcing flow control settings\n");
1153
			return ret_val;
1154
		}
1155
	}
1156
1157
	return 0;
1158
}
1159
1160
/**
1161
 *  e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex
1162
 *  @hw: pointer to the HW structure
1163
 *  @speed: stores the current speed
1164
 *  @duplex: stores the current duplex
1165
 *
1166
 *  Read the status register for the current speed/duplex and store the current
1167
 *  speed and duplex for copper connections.
1168
 **/
1169
s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1170
{
1171
	u32 status;
1172
1173
	status = er32(STATUS);
1174
	if (status & E1000_STATUS_SPEED_1000) {
1175
		*speed = SPEED_1000;
1176
		hw_dbg(hw, "1000 Mbs, ");
1177
	} else if (status & E1000_STATUS_SPEED_100) {
1178
		*speed = SPEED_100;
1179
		hw_dbg(hw, "100 Mbs, ");
1180
	} else {
1181
		*speed = SPEED_10;
1182
		hw_dbg(hw, "10 Mbs, ");
1183
	}
1184
1185
	if (status & E1000_STATUS_FD) {
1186
		*duplex = FULL_DUPLEX;
1187
		hw_dbg(hw, "Full Duplex\n");
1188
	} else {
1189
		*duplex = HALF_DUPLEX;
1190
		hw_dbg(hw, "Half Duplex\n");
1191
	}
1192
1193
	return 0;
1194
}
1195
1196
/**
1197
 *  e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex
1198
 *  @hw: pointer to the HW structure
1199
 *  @speed: stores the current speed
1200
 *  @duplex: stores the current duplex
1201
 *
1202
 *  Sets the speed and duplex to gigabit full duplex (the only possible option)
1203
 *  for fiber/serdes links.
1204
 **/
1205
s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1206
{
1207
	*speed = SPEED_1000;
1208
	*duplex = FULL_DUPLEX;
1209
1210
	return 0;
1211
}
1212
1213
/**
1214
 *  e1000e_get_hw_semaphore - Acquire hardware semaphore
1215
 *  @hw: pointer to the HW structure
1216
 *
1217
 *  Acquire the HW semaphore to access the PHY or NVM
1218
 **/
1219
s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1220
{
1221
	u32 swsm;
1222
	s32 timeout = hw->nvm.word_size + 1;
1223
	s32 i = 0;
1224
1225
	/* Get the SW semaphore */
1226
	while (i < timeout) {
1227
		swsm = er32(SWSM);
1228
		if (!(swsm & E1000_SWSM_SMBI))
1229
			break;
1230
1231
		udelay(50);
1232
		i++;
1233
	}
1234
1235
	if (i == timeout) {
1236
		hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1237
		return -E1000_ERR_NVM;
1238
	}
1239
1240
	/* Get the FW semaphore. */
1241
	for (i = 0; i < timeout; i++) {
1242
		swsm = er32(SWSM);
1243
		ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1244
1245
		/* Semaphore acquired if bit latched */
1246
		if (er32(SWSM) & E1000_SWSM_SWESMBI)
1247
			break;
1248
1249
		udelay(50);
1250
	}
1251
1252
	if (i == timeout) {
1253
		/* Release semaphores */
1254
		e1000e_put_hw_semaphore(hw);
1255
		hw_dbg(hw, "Driver can't access the NVM\n");
1256
		return -E1000_ERR_NVM;
1257
	}
1258
1259
	return 0;
1260
}
1261
1262
/**
1263
 *  e1000e_put_hw_semaphore - Release hardware semaphore
1264
 *  @hw: pointer to the HW structure
1265
 *
1266
 *  Release hardware semaphore used to access the PHY or NVM
1267
 **/
1268
void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1269
{
1270
	u32 swsm;
1271
1272
	swsm = er32(SWSM);
1273
	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1274
	ew32(SWSM, swsm);
1275
}
1276
1277
/**
1278
 *  e1000e_get_auto_rd_done - Check for auto read completion
1279
 *  @hw: pointer to the HW structure
1280
 *
1281
 *  Check EEPROM for Auto Read done bit.
1282
 **/
1283
s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1284
{
1285
	s32 i = 0;
1286
1287
	while (i < AUTO_READ_DONE_TIMEOUT) {
1288
		if (er32(EECD) & E1000_EECD_AUTO_RD)
1289
			break;
1290
		msleep(1);
1291
		i++;
1292
	}
1293
1294
	if (i == AUTO_READ_DONE_TIMEOUT) {
1295
		hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1296
		return -E1000_ERR_RESET;
1297
	}
1298
1299
	return 0;
1300
}
1301
1302
/**
1303
 *  e1000e_valid_led_default - Verify a valid default LED config
1304
 *  @hw: pointer to the HW structure
1305
 *  @data: pointer to the NVM (EEPROM)
1306
 *
1307
 *  Read the EEPROM for the current default LED configuration.  If the
1308
 *  LED configuration is not valid, set to a valid LED configuration.
1309
 **/
1310
s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1311
{
1312
	s32 ret_val;
1313
1314
	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1315
	if (ret_val) {
1316
		hw_dbg(hw, "NVM Read Error\n");
1317
		return ret_val;
1318
	}
1319
1320
	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1321
		*data = ID_LED_DEFAULT;
1322
1323
	return 0;
1324
}
1325
1326
/**
1327
 *  e1000e_id_led_init -
1328
 *  @hw: pointer to the HW structure
1329
 *
1330
 **/
1331
s32 e1000e_id_led_init(struct e1000_hw *hw)
1332
{
1333
	struct e1000_mac_info *mac = &hw->mac;
1334
	s32 ret_val;
1335
	const u32 ledctl_mask = 0x000000FF;
1336
	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1337
	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1338
	u16 data, i, temp;
1339
	const u16 led_mask = 0x0F;
1340
1341
	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1342
	if (ret_val)
1343
		return ret_val;
1344
1345
	mac->ledctl_default = er32(LEDCTL);
1346
	mac->ledctl_mode1 = mac->ledctl_default;
1347
	mac->ledctl_mode2 = mac->ledctl_default;
1348
1349
	for (i = 0; i < 4; i++) {
1350
		temp = (data >> (i << 2)) & led_mask;
1351
		switch (temp) {
1352
		case ID_LED_ON1_DEF2:
1353
		case ID_LED_ON1_ON2:
1354
		case ID_LED_ON1_OFF2:
1355
			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1356
			mac->ledctl_mode1 |= ledctl_on << (i << 3);
1357
			break;
1358
		case ID_LED_OFF1_DEF2:
1359
		case ID_LED_OFF1_ON2:
1360
		case ID_LED_OFF1_OFF2:
1361
			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1362
			mac->ledctl_mode1 |= ledctl_off << (i << 3);
1363
			break;
1364
		default:
1365
			/* Do nothing */
1366
			break;
1367
		}
1368
		switch (temp) {
1369
		case ID_LED_DEF1_ON2:
1370
		case ID_LED_ON1_ON2:
1371
		case ID_LED_OFF1_ON2:
1372
			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1373
			mac->ledctl_mode2 |= ledctl_on << (i << 3);
1374
			break;
1375
		case ID_LED_DEF1_OFF2:
1376
		case ID_LED_ON1_OFF2:
1377
		case ID_LED_OFF1_OFF2:
1378
			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1379
			mac->ledctl_mode2 |= ledctl_off << (i << 3);
1380
			break;
1381
		default:
1382
			/* Do nothing */
1383
			break;
1384
		}
1385
	}
1386
1387
	return 0;
1388
}
1389
1390
/**
1391
 *  e1000e_cleanup_led_generic - Set LED config to default operation
1392
 *  @hw: pointer to the HW structure
1393
 *
1394
 *  Remove the current LED configuration and set the LED configuration
1395
 *  to the default value, saved from the EEPROM.
1396
 **/
1397
s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1398
{
1399
	ew32(LEDCTL, hw->mac.ledctl_default);
1400
	return 0;
1401
}
1402
1403
/**
1404
 *  e1000e_blink_led - Blink LED
1405
 *  @hw: pointer to the HW structure
1406
 *
1407
 *  Blink the led's which are set to be on.
1408
 **/
1409
s32 e1000e_blink_led(struct e1000_hw *hw)
1410
{
1411
	u32 ledctl_blink = 0;
1412
	u32 i;
1413
1414
	if (hw->media_type == e1000_media_type_fiber) {
1415
		/* always blink LED0 for PCI-E fiber */
1416
		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1417
		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1418
	} else {
1419
		/* set the blink bit for each LED that's "on" (0x0E)
1420
		 * in ledctl_mode2 */
1421
		ledctl_blink = hw->mac.ledctl_mode2;
1422
		for (i = 0; i < 4; i++)
1423
			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1424
			    E1000_LEDCTL_MODE_LED_ON)
1425
				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1426
						 (i * 8));
1427
	}
1428
1429
	ew32(LEDCTL, ledctl_blink);
1430
1431
	return 0;
1432
}
1433
1434
/**
1435
 *  e1000e_led_on_generic - Turn LED on
1436
 *  @hw: pointer to the HW structure
1437
 *
1438
 *  Turn LED on.
1439
 **/
1440
s32 e1000e_led_on_generic(struct e1000_hw *hw)
1441
{
1442
	u32 ctrl;
1443
1444
	switch (hw->media_type) {
1445
	case e1000_media_type_fiber:
1446
		ctrl = er32(CTRL);
1447
		ctrl &= ~E1000_CTRL_SWDPIN0;
1448
		ctrl |= E1000_CTRL_SWDPIO0;
1449
		ew32(CTRL, ctrl);
1450
		break;
1451
	case e1000_media_type_copper:
1452
		ew32(LEDCTL, hw->mac.ledctl_mode2);
1453
		break;
1454
	default:
1455
		break;
1456
	}
1457
1458
	return 0;
1459
}
1460
1461
/**
1462
 *  e1000e_led_off_generic - Turn LED off
1463
 *  @hw: pointer to the HW structure
1464
 *
1465
 *  Turn LED off.
1466
 **/
1467
s32 e1000e_led_off_generic(struct e1000_hw *hw)
1468
{
1469
	u32 ctrl;
1470
1471
	switch (hw->media_type) {
1472
	case e1000_media_type_fiber:
1473
		ctrl = er32(CTRL);
1474
		ctrl |= E1000_CTRL_SWDPIN0;
1475
		ctrl |= E1000_CTRL_SWDPIO0;
1476
		ew32(CTRL, ctrl);
1477
		break;
1478
	case e1000_media_type_copper:
1479
		ew32(LEDCTL, hw->mac.ledctl_mode1);
1480
		break;
1481
	default:
1482
		break;
1483
	}
1484
1485
	return 0;
1486
}
1487
1488
/**
1489
 *  e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1490
 *  @hw: pointer to the HW structure
1491
 *  @no_snoop: bitmap of snoop events
1492
 *
1493
 *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1494
 **/
1495
void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1496
{
1497
	u32 gcr;
1498
1499
	if (no_snoop) {
1500
		gcr = er32(GCR);
1501
		gcr &= ~(PCIE_NO_SNOOP_ALL);
1502
		gcr |= no_snoop;
1503
		ew32(GCR, gcr);
1504
	}
1505
}
1506
1507
/**
1508
 *  e1000e_disable_pcie_master - Disables PCI-express master access
1509
 *  @hw: pointer to the HW structure
1510
 *
1511
 *  Returns 0 if successful, else returns -10
1512
 *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1513
 *  the master requests to be disabled.
1514
 *
1515
 *  Disables PCI-Express master access and verifies there are no pending
1516
 *  requests.
1517
 **/
1518
s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1519
{
1520
	u32 ctrl;
1521
	s32 timeout = MASTER_DISABLE_TIMEOUT;
1522
1523
	ctrl = er32(CTRL);
1524
	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1525
	ew32(CTRL, ctrl);
1526
1527
	while (timeout) {
1528
		if (!(er32(STATUS) &
1529
		      E1000_STATUS_GIO_MASTER_ENABLE))
1530
			break;
1531
		udelay(100);
1532
		timeout--;
1533
	}
1534
1535
	if (!timeout) {
1536
		hw_dbg(hw, "Master requests are pending.\n");
1537
		return -E1000_ERR_MASTER_REQUESTS_PENDING;
1538
	}
1539
1540
	return 0;
1541
}
1542
1543
/**
1544
 *  e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1545
 *  @hw: pointer to the HW structure
1546
 *
1547
 *  Reset the Adaptive Interframe Spacing throttle to default values.
1548
 **/
1549
void e1000e_reset_adaptive(struct e1000_hw *hw)
1550
{
1551
	struct e1000_mac_info *mac = &hw->mac;
1552
1553
	mac->current_ifs_val = 0;
1554
	mac->ifs_min_val = IFS_MIN;
1555
	mac->ifs_max_val = IFS_MAX;
1556
	mac->ifs_step_size = IFS_STEP;
1557
	mac->ifs_ratio = IFS_RATIO;
1558
1559
	mac->in_ifs_mode = 0;
1560
	ew32(AIT, 0);
1561
}
1562
1563
/**
1564
 *  e1000e_update_adaptive - Update Adaptive Interframe Spacing
1565
 *  @hw: pointer to the HW structure
1566
 *
1567
 *  Update the Adaptive Interframe Spacing Throttle value based on the
1568
 *  time between transmitted packets and time between collisions.
1569
 **/
1570
void e1000e_update_adaptive(struct e1000_hw *hw)
1571
{
1572
	struct e1000_mac_info *mac = &hw->mac;
1573
1574
	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1575
		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1576
			mac->in_ifs_mode = 1;
1577
			if (mac->current_ifs_val < mac->ifs_max_val) {
1578
				if (!mac->current_ifs_val)
1579
					mac->current_ifs_val = mac->ifs_min_val;
1580
				else
1581
					mac->current_ifs_val +=
1582
						mac->ifs_step_size;
1583
				ew32(AIT,
1584
						mac->current_ifs_val);
1585
			}
1586
		}
1587
	} else {
1588
		if (mac->in_ifs_mode &&
1589
		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1590
			mac->current_ifs_val = 0;
1591
			mac->in_ifs_mode = 0;
1592
			ew32(AIT, 0);
1593
		}
1594
	}
1595
}
1596
1597
/**
1598
 *  e1000_raise_eec_clk - Raise EEPROM clock
1599
 *  @hw: pointer to the HW structure
1600
 *  @eecd: pointer to the EEPROM
1601
 *
1602
 *  Enable/Raise the EEPROM clock bit.
1603
 **/
1604
static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1605
{
1606
	*eecd = *eecd | E1000_EECD_SK;
1607
	ew32(EECD, *eecd);
1608
	e1e_flush();
1609
	udelay(hw->nvm.delay_usec);
1610
}
1611
1612
/**
1613
 *  e1000_lower_eec_clk - Lower EEPROM clock
1614
 *  @hw: pointer to the HW structure
1615
 *  @eecd: pointer to the EEPROM
1616
 *
1617
 *  Clear/Lower the EEPROM clock bit.
1618
 **/
1619
static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1620
{
1621
	*eecd = *eecd & ~E1000_EECD_SK;
1622
	ew32(EECD, *eecd);
1623
	e1e_flush();
1624
	udelay(hw->nvm.delay_usec);
1625
}
1626
1627
/**
1628
 *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1629
 *  @hw: pointer to the HW structure
1630
 *  @data: data to send to the EEPROM
1631
 *  @count: number of bits to shift out
1632
 *
1633
 *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
1634
 *  "data" parameter will be shifted out to the EEPROM one bit at a time.
1635
 *  In order to do this, "data" must be broken down into bits.
1636
 **/
1637
static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1638
{
1639
	struct e1000_nvm_info *nvm = &hw->nvm;
1640
	u32 eecd = er32(EECD);
1641
	u32 mask;
1642
1643
	mask = 0x01 << (count - 1);
1644
	if (nvm->type == e1000_nvm_eeprom_spi)
1645
		eecd |= E1000_EECD_DO;
1646
1647
	do {
1648
		eecd &= ~E1000_EECD_DI;
1649
1650
		if (data & mask)
1651
			eecd |= E1000_EECD_DI;
1652
1653
		ew32(EECD, eecd);
1654
		e1e_flush();
1655
1656
		udelay(nvm->delay_usec);
1657
1658
		e1000_raise_eec_clk(hw, &eecd);
1659
		e1000_lower_eec_clk(hw, &eecd);
1660
1661
		mask >>= 1;
1662
	} while (mask);
1663
1664
	eecd &= ~E1000_EECD_DI;
1665
	ew32(EECD, eecd);
1666
}
1667
1668
/**
1669
 *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1670
 *  @hw: pointer to the HW structure
1671
 *  @count: number of bits to shift in
1672
 *
1673
 *  In order to read a register from the EEPROM, we need to shift 'count' bits
1674
 *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
1675
 *  the EEPROM (setting the SK bit), and then reading the value of the data out
1676
 *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
1677
 *  always be clear.
1678
 **/
1679
static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1680
{
1681
	u32 eecd;
1682
	u32 i;
1683
	u16 data;
1684
1685
	eecd = er32(EECD);
1686
1687
	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1688
	data = 0;
1689
1690
	for (i = 0; i < count; i++) {
1691
		data <<= 1;
1692
		e1000_raise_eec_clk(hw, &eecd);
1693
1694
		eecd = er32(EECD);
1695
1696
		eecd &= ~E1000_EECD_DI;
1697
		if (eecd & E1000_EECD_DO)
1698
			data |= 1;
1699
1700
		e1000_lower_eec_clk(hw, &eecd);
1701
	}
1702
1703
	return data;
1704
}
1705
1706
/**
1707
 *  e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1708
 *  @hw: pointer to the HW structure
1709
 *  @ee_reg: EEPROM flag for polling
1710
 *
1711
 *  Polls the EEPROM status bit for either read or write completion based
1712
 *  upon the value of 'ee_reg'.
1713
 **/
1714
s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1715
{
1716
	u32 attempts = 100000;
1717
	u32 i, reg = 0;
1718
1719
	for (i = 0; i < attempts; i++) {
1720
		if (ee_reg == E1000_NVM_POLL_READ)
1721
			reg = er32(EERD);
1722
		else
1723
			reg = er32(EEWR);
1724
1725
		if (reg & E1000_NVM_RW_REG_DONE)
1726
			return 0;
1727
1728
		udelay(5);
1729
	}
1730
1731
	return -E1000_ERR_NVM;
1732
}
1733
1734
/**
1735
 *  e1000e_acquire_nvm - Generic request for access to EEPROM
1736
 *  @hw: pointer to the HW structure
1737
 *
1738
 *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
1739
 *  Return successful if access grant bit set, else clear the request for
1740
 *  EEPROM access and return -E1000_ERR_NVM (-1).
1741
 **/
1742
s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1743
{
1744
	u32 eecd = er32(EECD);
1745
	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1746
1747
	ew32(EECD, eecd | E1000_EECD_REQ);
1748
	eecd = er32(EECD);
1749
1750
	while (timeout) {
1751
		if (eecd & E1000_EECD_GNT)
1752
			break;
1753
		udelay(5);
1754
		eecd = er32(EECD);
1755
		timeout--;
1756
	}
1757
1758
	if (!timeout) {
1759
		eecd &= ~E1000_EECD_REQ;
1760
		ew32(EECD, eecd);
1761
		hw_dbg(hw, "Could not acquire NVM grant\n");
1762
		return -E1000_ERR_NVM;
1763
	}
1764
1765
	return 0;
1766
}
1767
1768
/**
1769
 *  e1000_standby_nvm - Return EEPROM to standby state
1770
 *  @hw: pointer to the HW structure
1771
 *
1772
 *  Return the EEPROM to a standby state.
1773
 **/
1774
static void e1000_standby_nvm(struct e1000_hw *hw)
1775
{
1776
	struct e1000_nvm_info *nvm = &hw->nvm;
1777
	u32 eecd = er32(EECD);
1778
1779
	if (nvm->type == e1000_nvm_eeprom_spi) {
1780
		/* Toggle CS to flush commands */
1781
		eecd |= E1000_EECD_CS;
1782
		ew32(EECD, eecd);
1783
		e1e_flush();
1784
		udelay(nvm->delay_usec);
1785
		eecd &= ~E1000_EECD_CS;
1786
		ew32(EECD, eecd);
1787
		e1e_flush();
1788
		udelay(nvm->delay_usec);
1789
	}
1790
}
1791
1792
/**
1793
 *  e1000_stop_nvm - Terminate EEPROM command
1794
 *  @hw: pointer to the HW structure
1795
 *
1796
 *  Terminates the current command by inverting the EEPROM's chip select pin.
1797
 **/
1798
static void e1000_stop_nvm(struct e1000_hw *hw)
1799
{
1800
	u32 eecd;
1801
1802
	eecd = er32(EECD);
1803
	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1804
		/* Pull CS high */
1805
		eecd |= E1000_EECD_CS;
1806
		e1000_lower_eec_clk(hw, &eecd);
1807
	}
1808
}
1809
1810
/**
1811
 *  e1000e_release_nvm - Release exclusive access to EEPROM
1812
 *  @hw: pointer to the HW structure
1813
 *
1814
 *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
1815
 **/
1816
void e1000e_release_nvm(struct e1000_hw *hw)
1817
{
1818
	u32 eecd;
1819
1820
	e1000_stop_nvm(hw);
1821
1822
	eecd = er32(EECD);
1823
	eecd &= ~E1000_EECD_REQ;
1824
	ew32(EECD, eecd);
1825
}
1826
1827
/**
1828
 *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1829
 *  @hw: pointer to the HW structure
1830
 *
1831
 *  Setups the EEPROM for reading and writing.
1832
 **/
1833
static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1834
{
1835
	struct e1000_nvm_info *nvm = &hw->nvm;
1836
	u32 eecd = er32(EECD);
1837
	u16 timeout = 0;
1838
	u8 spi_stat_reg;
1839
1840
	if (nvm->type == e1000_nvm_eeprom_spi) {
1841
		/* Clear SK and CS */
1842
		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1843
		ew32(EECD, eecd);
1844
		udelay(1);
1845
		timeout = NVM_MAX_RETRY_SPI;
1846
1847
		/* Read "Status Register" repeatedly until the LSB is cleared.
1848
		 * The EEPROM will signal that the command has been completed
1849
		 * by clearing bit 0 of the internal status register.  If it's
1850
		 * not cleared within 'timeout', then error out. */
1851
		while (timeout) {
1852
			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1853
						 hw->nvm.opcode_bits);
1854
			spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1855
			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1856
				break;
1857
1858
			udelay(5);
1859
			e1000_standby_nvm(hw);
1860
			timeout--;
1861
		}
1862
1863
		if (!timeout) {
1864
			hw_dbg(hw, "SPI NVM Status error\n");
1865
			return -E1000_ERR_NVM;
1866
		}
1867
	}
1868
1869
	return 0;
1870
}
1871
1872
/**
1873
 *  e1000e_read_nvm_spi - Read EEPROM's using SPI
1874
 *  @hw: pointer to the HW structure
1875
 *  @offset: offset of word in the EEPROM to read
1876
 *  @words: number of words to read
1877
 *  @data: word read from the EEPROM
1878
 *
1879
 *  Reads a 16 bit word from the EEPROM.
1880
 **/
1881
s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1882
{
1883
	struct e1000_nvm_info *nvm = &hw->nvm;
1884
	u32 i = 0;
1885
	s32 ret_val;
1886
	u16 word_in;
1887
	u8 read_opcode = NVM_READ_OPCODE_SPI;
1888
1889
	/* A check for invalid values:  offset too large, too many words,
1890
	 * and not enough words. */
1891
	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1892
	    (words == 0)) {
1893
		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1894
		return -E1000_ERR_NVM;
1895
	}
1896
1897
	ret_val = nvm->ops.acquire_nvm(hw);
1898
	if (ret_val)
1899
		return ret_val;
1900
1901
	ret_val = e1000_ready_nvm_eeprom(hw);
1902
	if (ret_val) {
1903
		nvm->ops.release_nvm(hw);
1904
		return ret_val;
1905
	}
1906
1907
	e1000_standby_nvm(hw);
1908
1909
	if ((nvm->address_bits == 8) && (offset >= 128))
1910
		read_opcode |= NVM_A8_OPCODE_SPI;
1911
1912
	/* Send the READ command (opcode + addr) */
1913
	e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
1914
	e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
1915
1916
	/* Read the data.  SPI NVMs increment the address with each byte
1917
	 * read and will roll over if reading beyond the end.  This allows
1918
	 * us to read the whole NVM from any offset */
1919
	for (i = 0; i < words; i++) {
1920
		word_in = e1000_shift_in_eec_bits(hw, 16);
1921
		data[i] = (word_in >> 8) | (word_in << 8);
1922
	}
1923
1924
	nvm->ops.release_nvm(hw);
1925
	return 0;
1926
}
1927
1928
/**
1929
 *  e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1930
 *  @hw: pointer to the HW structure
1931
 *  @offset: offset of word in the EEPROM to read
1932
 *  @words: number of words to read
1933
 *  @data: word read from the EEPROM
1934
 *
1935
 *  Reads a 16 bit word from the EEPROM using the EERD register.
1936
 **/
1937
s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1938
{
1939
	struct e1000_nvm_info *nvm = &hw->nvm;
1940
	u32 i, eerd = 0;
1941
	s32 ret_val = 0;
1942
1943
	/* A check for invalid values:  offset too large, too many words,
1944
	 * and not enough words. */
1945
	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1946
	    (words == 0)) {
1947
		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1948
		return -E1000_ERR_NVM;
1949
	}
1950
1951
	for (i = 0; i < words; i++) {
1952
		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1953
		       E1000_NVM_RW_REG_START;
1954
1955
		ew32(EERD, eerd);
1956
		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1957
		if (ret_val)
1958
			break;
1959
1960
		data[i] = (er32(EERD) >>
1961
			   E1000_NVM_RW_REG_DATA);
1962
	}
1963
1964
	return ret_val;
1965
}
1966
1967
/**
1968
 *  e1000e_write_nvm_spi - Write to EEPROM using SPI
1969
 *  @hw: pointer to the HW structure
1970
 *  @offset: offset within the EEPROM to be written to
1971
 *  @words: number of words to write
1972
 *  @data: 16 bit word(s) to be written to the EEPROM
1973
 *
1974
 *  Writes data to EEPROM at offset using SPI interface.
1975
 *
1976
 *  If e1000e_update_nvm_checksum is not called after this function , the
1977
 *  EEPROM will most likley contain an invalid checksum.
1978
 **/
1979
s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1980
{
1981
	struct e1000_nvm_info *nvm = &hw->nvm;
1982
	s32 ret_val;
1983
	u16 widx = 0;
1984
1985
	/* A check for invalid values:  offset too large, too many words,
1986
	 * and not enough words. */
1987
	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1988
	    (words == 0)) {
1989
		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1990
		return -E1000_ERR_NVM;
1991
	}
1992
1993
	ret_val = nvm->ops.acquire_nvm(hw);
1994
	if (ret_val)
1995
		return ret_val;
1996
1997
	msleep(10);
1998
1999
	while (widx < words) {
2000
		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2001
2002
		ret_val = e1000_ready_nvm_eeprom(hw);
2003
		if (ret_val) {
2004
			nvm->ops.release_nvm(hw);
2005
			return ret_val;
2006
		}
2007
2008
		e1000_standby_nvm(hw);
2009
2010
		/* Send the WRITE ENABLE command (8 bit opcode) */
2011
		e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2012
					 nvm->opcode_bits);
2013
2014
		e1000_standby_nvm(hw);
2015
2016
		/* Some SPI eeproms use the 8th address bit embedded in the
2017
		 * opcode */
2018
		if ((nvm->address_bits == 8) && (offset >= 128))
2019
			write_opcode |= NVM_A8_OPCODE_SPI;
2020
2021
		/* Send the Write command (8-bit opcode + addr) */
2022
		e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2023
		e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2024
					 nvm->address_bits);
2025
2026
		/* Loop to allow for up to whole page write of eeprom */
2027
		while (widx < words) {
2028
			u16 word_out = data[widx];
2029
			word_out = (word_out >> 8) | (word_out << 8);
2030
			e1000_shift_out_eec_bits(hw, word_out, 16);
2031
			widx++;
2032
2033
			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2034
				e1000_standby_nvm(hw);
2035
				break;
2036
			}
2037
		}
2038
	}
2039
2040
	msleep(10);
2041
	return 0;
2042
}
2043
2044
/**
2045
 *  e1000e_read_mac_addr - Read device MAC address
2046
 *  @hw: pointer to the HW structure
2047
 *
2048
 *  Reads the device MAC address from the EEPROM and stores the value.
2049
 *  Since devices with two ports use the same EEPROM, we increment the
2050
 *  last bit in the MAC address for the second port.
2051
 **/
2052
s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2053
{
2054
	s32 ret_val;
2055
	u16 offset, nvm_data, i;
2056
2057
	for (i = 0; i < ETH_ALEN; i += 2) {
2058
		offset = i >> 1;
2059
		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2060
		if (ret_val) {
2061
			hw_dbg(hw, "NVM Read Error\n");
2062
			return ret_val;
2063
		}
2064
		hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2065
		hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2066
	}
2067
2068
	/* Flip last bit of mac address if we're on second port */
2069
	if (hw->bus.func == E1000_FUNC_1)
2070
		hw->mac.perm_addr[5] ^= 1;
2071
2072
	for (i = 0; i < ETH_ALEN; i++)
2073
		hw->mac.addr[i] = hw->mac.perm_addr[i];
2074
2075
	return 0;
2076
}
2077
2078
/**
2079
 *  e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2080
 *  @hw: pointer to the HW structure
2081
 *
2082
 *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2083
 *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
2084
 **/
2085
s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2086
{
2087
	s32 ret_val;
2088
	u16 checksum = 0;
2089
	u16 i, nvm_data;
2090
2091
	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2092
		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2093
		if (ret_val) {
2094
			hw_dbg(hw, "NVM Read Error\n");
2095
			return ret_val;
2096
		}
2097
		checksum += nvm_data;
2098
	}
2099
2100
	if (checksum != (u16) NVM_SUM) {
2101
		hw_dbg(hw, "NVM Checksum Invalid\n");
2102
		return -E1000_ERR_NVM;
2103
	}
2104
2105
	return 0;
2106
}
2107
2108
/**
2109
 *  e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2110
 *  @hw: pointer to the HW structure
2111
 *
2112
 *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
2113
 *  up to the checksum.  Then calculates the EEPROM checksum and writes the
2114
 *  value to the EEPROM.
2115
 **/
2116
s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2117
{
2118
	s32 ret_val;
2119
	u16 checksum = 0;
2120
	u16 i, nvm_data;
2121
2122
	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2123
		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2124
		if (ret_val) {
2125
			hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2126
			return ret_val;
2127
		}
2128
		checksum += nvm_data;
2129
	}
2130
	checksum = (u16) NVM_SUM - checksum;
2131
	ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2132
	if (ret_val)
2133
		hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2134
2135
	return ret_val;
2136
}
2137
2138
/**
2139
 *  e1000e_reload_nvm - Reloads EEPROM
2140
 *  @hw: pointer to the HW structure
2141
 *
2142
 *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2143
 *  extended control register.
2144
 **/
2145
void e1000e_reload_nvm(struct e1000_hw *hw)
2146
{
2147
	u32 ctrl_ext;
2148
2149
	udelay(10);
2150
	ctrl_ext = er32(CTRL_EXT);
2151
	ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2152
	ew32(CTRL_EXT, ctrl_ext);
2153
	e1e_flush();
2154
}
2155
2156
/**
2157
 *  e1000_calculate_checksum - Calculate checksum for buffer
2158
 *  @buffer: pointer to EEPROM
2159
 *  @length: size of EEPROM to calculate a checksum for
2160
 *
2161
 *  Calculates the checksum for some buffer on a specified length.  The
2162
 *  checksum calculated is returned.
2163
 **/
2164
static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2165
{
2166
	u32 i;
2167
	u8  sum = 0;
2168
2169
	if (!buffer)
2170
		return 0;
2171
2172
	for (i = 0; i < length; i++)
2173
		sum += buffer[i];
2174
2175
	return (u8) (0 - sum);
2176
}
2177
2178
/**
2179
 *  e1000_mng_enable_host_if - Checks host interface is enabled
2180
 *  @hw: pointer to the HW structure
2181
 *
2182
 *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2183
 *
2184
 *  This function checks whether the HOST IF is enabled for command operaton
2185
 *  and also checks whether the previous command is completed.  It busy waits
2186
 *  in case of previous command is not completed.
2187
 **/
2188
static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2189
{
2190
	u32 hicr;
2191
	u8 i;
2192
2193
	/* Check that the host interface is enabled. */
2194
	hicr = er32(HICR);
2195
	if ((hicr & E1000_HICR_EN) == 0) {
2196
		hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2197
		return -E1000_ERR_HOST_INTERFACE_COMMAND;
2198
	}
2199
	/* check the previous command is completed */
2200
	for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2201
		hicr = er32(HICR);
2202
		if (!(hicr & E1000_HICR_C))
2203
			break;
2204
		mdelay(1);
2205
	}
2206
2207
	if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2208
		hw_dbg(hw, "Previous command timeout failed .\n");
2209
		return -E1000_ERR_HOST_INTERFACE_COMMAND;
2210
	}
2211
2212
	return 0;
2213
}
2214
2215
/**
2216
 *  e1000e_check_mng_mode - check managament mode
2217
 *  @hw: pointer to the HW structure
2218
 *
2219
 *  Reads the firmware semaphore register and returns true (>0) if
2220
 *  manageability is enabled, else false (0).
2221
 **/
2222
bool e1000e_check_mng_mode(struct e1000_hw *hw)
2223
{
2224
	u32 fwsm = er32(FWSM);
2225
2226
	return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab;
2227
}
2228
2229
/**
2230
 *  e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX
2231
 *  @hw: pointer to the HW structure
2232
 *
2233
 *  Enables packet filtering on transmit packets if manageability is enabled
2234
 *  and host interface is enabled.
2235
 **/
2236
bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2237
{
2238
	struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2239
	u32 *buffer = (u32 *)&hw->mng_cookie;
2240
	u32 offset;
2241
	s32 ret_val, hdr_csum, csum;
2242
	u8 i, len;
2243
2244
	/* No manageability, no filtering */
2245
	if (!e1000e_check_mng_mode(hw)) {
2246
		hw->mac.tx_pkt_filtering = 0;
2247
		return 0;
2248
	}
2249
2250
	/* If we can't read from the host interface for whatever
2251
	 * reason, disable filtering.
2252
	 */
2253
	ret_val = e1000_mng_enable_host_if(hw);
2254
	if (ret_val != 0) {
2255
		hw->mac.tx_pkt_filtering = 0;
2256
		return ret_val;
2257
	}
2258
2259
	/* Read in the header.  Length and offset are in dwords. */
2260
	len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2261
	offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2262
	for (i = 0; i < len; i++)
2263
		*(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2264
	hdr_csum = hdr->checksum;
2265
	hdr->checksum = 0;
2266
	csum = e1000_calculate_checksum((u8 *)hdr,
2267
					E1000_MNG_DHCP_COOKIE_LENGTH);
2268
	/* If either the checksums or signature don't match, then
2269
	 * the cookie area isn't considered valid, in which case we
2270
	 * take the safe route of assuming Tx filtering is enabled.
2271
	 */
2272
	if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2273
		hw->mac.tx_pkt_filtering = 1;
2274
		return 1;
2275
	}
2276
2277
	/* Cookie area is valid, make the final check for filtering. */
2278
	if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2279
		hw->mac.tx_pkt_filtering = 0;
2280
		return 0;
2281
	}
2282
2283
	hw->mac.tx_pkt_filtering = 1;
2284
	return 1;
2285
}
2286
2287
/**
2288
 *  e1000_mng_write_cmd_header - Writes manageability command header
2289
 *  @hw: pointer to the HW structure
2290
 *  @hdr: pointer to the host interface command header
2291
 *
2292
 *  Writes the command header after does the checksum calculation.
2293
 **/
2294
static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2295
				  struct e1000_host_mng_command_header *hdr)
2296
{
2297
	u16 i, length = sizeof(struct e1000_host_mng_command_header);
2298
2299
	/* Write the whole command header structure with new checksum. */
2300
2301
	hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2302
2303
	length >>= 2;
2304
	/* Write the relevant command block into the ram area. */
2305
	for (i = 0; i < length; i++) {
2306
		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2307
					    *((u32 *) hdr + i));
2308
		e1e_flush();
2309
	}
2310
2311
	return 0;
2312
}
2313
2314
/**
2315
 *  e1000_mng_host_if_write - Writes to the manageability host interface
2316
 *  @hw: pointer to the HW structure
2317
 *  @buffer: pointer to the host interface buffer
2318
 *  @length: size of the buffer
2319
 *  @offset: location in the buffer to write to
2320
 *  @sum: sum of the data (not checksum)
2321
 *
2322
 *  This function writes the buffer content at the offset given on the host if.
2323
 *  It also does alignment considerations to do the writes in most efficient
2324
 *  way.  Also fills up the sum of the buffer in *buffer parameter.
2325
 **/
2326
static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2327
				   u16 length, u16 offset, u8 *sum)
2328
{
2329
	u8 *tmp;
2330
	u8 *bufptr = buffer;
2331
	u32 data = 0;
2332
	u16 remaining, i, j, prev_bytes;
2333
2334
	/* sum = only sum of the data and it is not checksum */
2335
2336
	if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2337
		return -E1000_ERR_PARAM;
2338
2339
	tmp = (u8 *)&data;
2340
	prev_bytes = offset & 0x3;
2341
	offset >>= 2;
2342
2343
	if (prev_bytes) {
2344
		data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2345
		for (j = prev_bytes; j < sizeof(u32); j++) {
2346
			*(tmp + j) = *bufptr++;
2347
			*sum += *(tmp + j);
2348
		}
2349
		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2350
		length -= j - prev_bytes;
2351
		offset++;
2352
	}
2353
2354
	remaining = length & 0x3;
2355
	length -= remaining;
2356
2357
	/* Calculate length in DWORDs */
2358
	length >>= 2;
2359
2360
	/* The device driver writes the relevant command block into the
2361
	 * ram area. */
2362
	for (i = 0; i < length; i++) {
2363
		for (j = 0; j < sizeof(u32); j++) {
2364
			*(tmp + j) = *bufptr++;
2365
			*sum += *(tmp + j);
2366
		}
2367
2368
		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2369
	}
2370
	if (remaining) {
2371
		for (j = 0; j < sizeof(u32); j++) {
2372
			if (j < remaining)
2373
				*(tmp + j) = *bufptr++;
2374
			else
2375
				*(tmp + j) = 0;
2376
2377
			*sum += *(tmp + j);
2378
		}
2379
		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2380
	}
2381
2382
	return 0;
2383
}
2384
2385
/**
2386
 *  e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2387
 *  @hw: pointer to the HW structure
2388
 *  @buffer: pointer to the host interface
2389
 *  @length: size of the buffer
2390
 *
2391
 *  Writes the DHCP information to the host interface.
2392
 **/
2393
s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2394
{
2395
	struct e1000_host_mng_command_header hdr;
2396
	s32 ret_val;
2397
	u32 hicr;
2398
2399
	hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2400
	hdr.command_length = length;
2401
	hdr.reserved1 = 0;
2402
	hdr.reserved2 = 0;
2403
	hdr.checksum = 0;
2404
2405
	/* Enable the host interface */
2406
	ret_val = e1000_mng_enable_host_if(hw);
2407
	if (ret_val)
2408
		return ret_val;
2409
2410
	/* Populate the host interface with the contents of "buffer". */
2411
	ret_val = e1000_mng_host_if_write(hw, buffer, length,
2412
					  sizeof(hdr), &(hdr.checksum));
2413
	if (ret_val)
2414
		return ret_val;
2415
2416
	/* Write the manageability command header */
2417
	ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2418
	if (ret_val)
2419
		return ret_val;
2420
2421
	/* Tell the ARC a new command is pending. */
2422
	hicr = er32(HICR);
2423
	ew32(HICR, hicr | E1000_HICR_C);
2424
2425
	return 0;
2426
}
2427
2428
/**
2429
 *  e1000e_enable_mng_pass_thru - Enable processing of ARP's
2430
 *  @hw: pointer to the HW structure
2431
 *
2432
 *  Verifies the hardware needs to allow ARPs to be processed by the host.
2433
 **/
2434
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2435
{
2436
	u32 manc;
2437
	u32 fwsm, factps;
2438
	bool ret_val = 0;
2439
2440
	manc = er32(MANC);
2441
2442
	if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2443
	    !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2444
		return ret_val;
2445
2446
	if (hw->mac.arc_subsystem_valid) {
2447
		fwsm = er32(FWSM);
2448
		factps = er32(FACTPS);
2449
2450
		if (!(factps & E1000_FACTPS_MNGCG) &&
2451
		    ((fwsm & E1000_FWSM_MODE_MASK) ==
2452
		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2453
			ret_val = 1;
2454
			return ret_val;
2455
		}
2456
	} else {
2457
		if ((manc & E1000_MANC_SMBUS_EN) &&
2458
		    !(manc & E1000_MANC_ASF_EN)) {
2459
			ret_val = 1;
2460
			return ret_val;
2461
		}
2462
	}
2463
2464
	return ret_val;
2465
}
2466
2467
s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
2468
{
2469
	s32 ret_val;
2470
	u16 nvm_data;
2471
2472
	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2473
	if (ret_val) {
2474
		hw_dbg(hw, "NVM Read Error\n");
2475
		return ret_val;
2476
	}
2477
	*part_num = (u32)(nvm_data << 16);
2478
2479
	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2480
	if (ret_val) {
2481
		hw_dbg(hw, "NVM Read Error\n");
2482
		return ret_val;
2483
	}
2484
	*part_num |= nvm_data;
2485
2486
	return 0;
2487
}
(-)a/drivers/net/e1000e/netdev.c (+4441 lines)
Line 0 Link Here
1
/*******************************************************************************
2
3
  Intel PRO/1000 Linux driver
4
  Copyright(c) 1999 - 2007 Intel Corporation.
5
6
  This program is free software; you can redistribute it and/or modify it
7
  under the terms and conditions of the GNU General Public License,
8
  version 2, as published by the Free Software Foundation.
9
10
  This program is distributed in the hope it will be useful, but WITHOUT
11
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13
  more details.
14
15
  You should have received a copy of the GNU General Public License along with
16
  this program; if not, write to the Free Software Foundation, Inc.,
17
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19
  The full GNU General Public License is included in this distribution in
20
  the file called "COPYING".
21
22
  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27
*******************************************************************************/
28
29
#include <linux/module.h>
30
#include <linux/types.h>
31
#include <linux/init.h>
32
#include <linux/pci.h>
33
#include <linux/vmalloc.h>
34
#include <linux/pagemap.h>
35
#include <linux/delay.h>
36
#include <linux/netdevice.h>
37
#include <linux/tcp.h>
38
#include <linux/ipv6.h>
39
#include <net/checksum.h>
40
#include <net/ip6_checksum.h>
41
#include <linux/mii.h>
42
#include <linux/ethtool.h>
43
#include <linux/if_vlan.h>
44
#include <linux/cpu.h>
45
#include <linux/smp.h>
46
47
#include "e1000.h"
48
49
#define DRV_VERSION "0.2.0"
50
char e1000e_driver_name[] = "e1000e";
51
const char e1000e_driver_version[] = DRV_VERSION;
52
53
static const struct e1000_info *e1000_info_tbl[] = {
54
	[board_82571]		= &e1000_82571_info,
55
	[board_82572]		= &e1000_82572_info,
56
	[board_82573]		= &e1000_82573_info,
57
	[board_80003es2lan]	= &e1000_es2_info,
58
	[board_ich8lan]		= &e1000_ich8_info,
59
	[board_ich9lan]		= &e1000_ich9_info,
60
};
61
62
#ifdef DEBUG
63
/**
64
 * e1000_get_hw_dev_name - return device name string
65
 * used by hardware layer to print debugging information
66
 **/
67
char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
68
{
69
	struct e1000_adapter *adapter = hw->back;
70
	struct net_device *netdev = adapter->netdev;
71
	return netdev->name;
72
}
73
#endif
74
75
/**
76
 * e1000_desc_unused - calculate if we have unused descriptors
77
 **/
78
static int e1000_desc_unused(struct e1000_ring *ring)
79
{
80
	if (ring->next_to_clean > ring->next_to_use)
81
		return ring->next_to_clean - ring->next_to_use - 1;
82
83
	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
84
}
85
86
/**
87
 * e1000_receive_skb - helper function to handle rx indications
88
 * @adapter: board private structure
89
 * @status: descriptor status field as written by hardware
90
 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
91
 * @skb: pointer to sk_buff to be indicated to stack
92
 **/
93
static void e1000_receive_skb(struct e1000_adapter *adapter,
94
			      struct net_device *netdev,
95
			      struct sk_buff *skb,
96
			      u8 status, u16 vlan)
97
{
98
	skb->protocol = eth_type_trans(skb, netdev);
99
100
	if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
101
		vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
102
					 le16_to_cpu(vlan) &
103
					 E1000_RXD_SPC_VLAN_MASK);
104
	else
105
		netif_receive_skb(skb);
106
107
	netdev->last_rx = jiffies;
108
}
109
110
/**
111
 * e1000_rx_checksum - Receive Checksum Offload for 82543
112
 * @adapter:     board private structure
113
 * @status_err:  receive descriptor status and error fields
114
 * @csum:	receive descriptor csum field
115
 * @sk_buff:     socket buffer with received data
116
 **/
117
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
118
			      u32 csum, struct sk_buff *skb)
119
{
120
	u16 status = (u16)status_err;
121
	u8 errors = (u8)(status_err >> 24);
122
	skb->ip_summed = CHECKSUM_NONE;
123
124
	/* Ignore Checksum bit is set */
125
	if (status & E1000_RXD_STAT_IXSM)
126
		return;
127
	/* TCP/UDP checksum error bit is set */
128
	if (errors & E1000_RXD_ERR_TCPE) {
129
		/* let the stack verify checksum errors */
130
		adapter->hw_csum_err++;
131
		return;
132
	}
133
134
	/* TCP/UDP Checksum has not been calculated */
135
	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
136
		return;
137
138
	/* It must be a TCP or UDP packet with a valid checksum */
139
	if (status & E1000_RXD_STAT_TCPCS) {
140
		/* TCP checksum is good */
141
		skb->ip_summed = CHECKSUM_UNNECESSARY;
142
	} else {
143
		/* IP fragment with UDP payload */
144
		/* Hardware complements the payload checksum, so we undo it
145
		 * and then put the value in host order for further stack use.
146
		 */
147
		csum = ntohl(csum ^ 0xFFFF);
148
		skb->csum = csum;
149
		skb->ip_summed = CHECKSUM_COMPLETE;
150
	}
151
	adapter->hw_csum_good++;
152
}
153
154
/**
155
 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
156
 * @adapter: address of board private structure
157
 **/
158
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
159
				   int cleaned_count)
160
{
161
	struct net_device *netdev = adapter->netdev;
162
	struct pci_dev *pdev = adapter->pdev;
163
	struct e1000_ring *rx_ring = adapter->rx_ring;
164
	struct e1000_rx_desc *rx_desc;
165
	struct e1000_buffer *buffer_info;
166
	struct sk_buff *skb;
167
	unsigned int i;
168
	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
169
170
	i = rx_ring->next_to_use;
171
	buffer_info = &rx_ring->buffer_info[i];
172
173
	while (cleaned_count--) {
174
		skb = buffer_info->skb;
175
		if (skb) {
176
			skb_trim(skb, 0);
177
			goto map_skb;
178
		}
179
180
		skb = netdev_alloc_skb(netdev, bufsz);
181
		if (!skb) {
182
			/* Better luck next round */
183
			adapter->alloc_rx_buff_failed++;
184
			break;
185
		}
186
187
		/* Make buffer alignment 2 beyond a 16 byte boundary
188
		 * this will result in a 16 byte aligned IP header after
189
		 * the 14 byte MAC header is removed
190
		 */
191
		skb_reserve(skb, NET_IP_ALIGN);
192
193
		buffer_info->skb = skb;
194
map_skb:
195
		buffer_info->dma = pci_map_single(pdev, skb->data,
196
						  adapter->rx_buffer_len,
197
						  PCI_DMA_FROMDEVICE);
198
		if (pci_dma_mapping_error(buffer_info->dma)) {
199
			dev_err(&pdev->dev, "RX DMA map failed\n");
200
			adapter->rx_dma_failed++;
201
			break;
202
		}
203
204
		rx_desc = E1000_RX_DESC(*rx_ring, i);
205
		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
206
207
		i++;
208
		if (i == rx_ring->count)
209
			i = 0;
210
		buffer_info = &rx_ring->buffer_info[i];
211
	}
212
213
	if (rx_ring->next_to_use != i) {
214
		rx_ring->next_to_use = i;
215
		if (i-- == 0)
216
			i = (rx_ring->count - 1);
217
218
		/* Force memory writes to complete before letting h/w
219
		 * know there are new descriptors to fetch.  (Only
220
		 * applicable for weak-ordered memory model archs,
221
		 * such as IA-64). */
222
		wmb();
223
		writel(i, adapter->hw.hw_addr + rx_ring->tail);
224
	}
225
}
226
227
/**
228
 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
229
 * @adapter: address of board private structure
230
 **/
231
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
232
				      int cleaned_count)
233
{
234
	struct net_device *netdev = adapter->netdev;
235
	struct pci_dev *pdev = adapter->pdev;
236
	union e1000_rx_desc_packet_split *rx_desc;
237
	struct e1000_ring *rx_ring = adapter->rx_ring;
238
	struct e1000_buffer *buffer_info;
239
	struct e1000_ps_page *ps_page;
240
	struct sk_buff *skb;
241
	unsigned int i, j;
242
243
	i = rx_ring->next_to_use;
244
	buffer_info = &rx_ring->buffer_info[i];
245
246
	while (cleaned_count--) {
247
		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
248
249
		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
250
			ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
251
						     + j];
252
			if (j < adapter->rx_ps_pages) {
253
				if (!ps_page->page) {
254
					ps_page->page = alloc_page(GFP_ATOMIC);
255
					if (!ps_page->page) {
256
						adapter->alloc_rx_buff_failed++;
257
						goto no_buffers;
258
					}
259
					ps_page->dma = pci_map_page(pdev,
260
							   ps_page->page,
261
							   0, PAGE_SIZE,
262
							   PCI_DMA_FROMDEVICE);
263
					if (pci_dma_mapping_error(
264
							ps_page->dma)) {
265
						dev_err(&adapter->pdev->dev,
266
						  "RX DMA page map failed\n");
267
						adapter->rx_dma_failed++;
268
						goto no_buffers;
269
					}
270
				}
271
				/*
272
				 * Refresh the desc even if buffer_addrs
273
				 * didn't change because each write-back
274
				 * erases this info.
275
				 */
276
				rx_desc->read.buffer_addr[j+1] =
277
				     cpu_to_le64(ps_page->dma);
278
			} else {
279
				rx_desc->read.buffer_addr[j+1] = ~0;
280
			}
281
		}
282
283
		skb = netdev_alloc_skb(netdev,
284
				       adapter->rx_ps_bsize0 + NET_IP_ALIGN);
285
286
		if (!skb) {
287
			adapter->alloc_rx_buff_failed++;
288
			break;
289
		}
290
291
		/* Make buffer alignment 2 beyond a 16 byte boundary
292
		 * this will result in a 16 byte aligned IP header after
293
		 * the 14 byte MAC header is removed
294
		 */
295
		skb_reserve(skb, NET_IP_ALIGN);
296
297
		buffer_info->skb = skb;
298
		buffer_info->dma = pci_map_single(pdev, skb->data,
299
						  adapter->rx_ps_bsize0,
300
						  PCI_DMA_FROMDEVICE);
301
		if (pci_dma_mapping_error(buffer_info->dma)) {
302
			dev_err(&pdev->dev, "RX DMA map failed\n");
303
			adapter->rx_dma_failed++;
304
			/* cleanup skb */
305
			dev_kfree_skb_any(skb);
306
			buffer_info->skb = NULL;
307
			break;
308
		}
309
310
		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
311
312
		i++;
313
		if (i == rx_ring->count)
314
			i = 0;
315
		buffer_info = &rx_ring->buffer_info[i];
316
	}
317
318
no_buffers:
319
	if (rx_ring->next_to_use != i) {
320
		rx_ring->next_to_use = i;
321
322
		if (!(i--))
323
			i = (rx_ring->count - 1);
324
325
		/* Force memory writes to complete before letting h/w
326
		 * know there are new descriptors to fetch.  (Only
327
		 * applicable for weak-ordered memory model archs,
328
		 * such as IA-64). */
329
		wmb();
330
		/* Hardware increments by 16 bytes, but packet split
331
		 * descriptors are 32 bytes...so we increment tail
332
		 * twice as much.
333
		 */
334
		writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
335
	}
336
}
337
338
/**
339
 * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers
340
 *
341
 * @adapter: address of board private structure
342
 * @cleaned_count: number of buffers to allocate this pass
343
 **/
344
static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter,
345
					 int cleaned_count)
346
{
347
	struct net_device *netdev = adapter->netdev;
348
	struct pci_dev *pdev = adapter->pdev;
349
	struct e1000_ring *rx_ring = adapter->rx_ring;
350
	struct e1000_rx_desc *rx_desc;
351
	struct e1000_buffer *buffer_info;
352
	struct sk_buff *skb;
353
	unsigned int i;
354
	unsigned int bufsz = 256 -
355
			     16 /*for skb_reserve */ -
356
			     NET_IP_ALIGN;
357
358
	i = rx_ring->next_to_use;
359
	buffer_info = &rx_ring->buffer_info[i];
360
361
	while (cleaned_count--) {
362
		skb = buffer_info->skb;
363
		if (skb) {
364
			skb_trim(skb, 0);
365
			goto check_page;
366
		}
367
368
		skb = netdev_alloc_skb(netdev, bufsz);
369
		if (!skb) {
370
			/* Better luck next round */
371
			adapter->alloc_rx_buff_failed++;
372
			break;
373
		}
374
375
		/* Make buffer alignment 2 beyond a 16 byte boundary
376
		 * this will result in a 16 byte aligned IP header after
377
		 * the 14 byte MAC header is removed
378
		 */
379
		skb_reserve(skb, NET_IP_ALIGN);
380
381
		buffer_info->skb = skb;
382
check_page:
383
		/* allocate a new page if necessary */
384
		if (!buffer_info->page) {
385
			buffer_info->page = alloc_page(GFP_ATOMIC);
386
			if (!buffer_info->page) {
387
				adapter->alloc_rx_buff_failed++;
388
				break;
389
			}
390
		}
391
392
		if (!buffer_info->dma)
393
			buffer_info->dma = pci_map_page(pdev,
394
							buffer_info->page, 0,
395
							PAGE_SIZE,
396
							PCI_DMA_FROMDEVICE);
397
		if (pci_dma_mapping_error(buffer_info->dma)) {
398
			dev_err(&adapter->pdev->dev, "RX DMA page map failed\n");
399
			adapter->rx_dma_failed++;
400
			break;
401
		}
402
403
		rx_desc = E1000_RX_DESC(*rx_ring, i);
404
		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
405
406
		i++;
407
		if (i == rx_ring->count)
408
			i = 0;
409
		buffer_info = &rx_ring->buffer_info[i];
410
	}
411
412
	if (rx_ring->next_to_use != i) {
413
		rx_ring->next_to_use = i;
414
		if (i-- == 0)
415
			i = (rx_ring->count - 1);
416
417
		/* Force memory writes to complete before letting h/w
418
		 * know there are new descriptors to fetch.  (Only
419
		 * applicable for weak-ordered memory model archs,
420
		 * such as IA-64). */
421
		wmb();
422
		writel(i, adapter->hw.hw_addr + rx_ring->tail);
423
	}
424
}
425
426
/**
427
 * e1000_clean_rx_irq - Send received data up the network stack; legacy
428
 * @adapter: board private structure
429
 *
430
 * the return value indicates whether actual cleaning was done, there
431
 * is no guarantee that everything was cleaned
432
 **/
433
static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
434
			       int *work_done, int work_to_do)
435
{
436
	struct net_device *netdev = adapter->netdev;
437
	struct pci_dev *pdev = adapter->pdev;
438
	struct e1000_ring *rx_ring = adapter->rx_ring;
439
	struct e1000_rx_desc *rx_desc, *next_rxd;
440
	struct e1000_buffer *buffer_info, *next_buffer;
441
	u32 length;
442
	unsigned int i;
443
	int cleaned_count = 0;
444
	bool cleaned = 0;
445
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
446
447
	i = rx_ring->next_to_clean;
448
	rx_desc = E1000_RX_DESC(*rx_ring, i);
449
	buffer_info = &rx_ring->buffer_info[i];
450
451
	while (rx_desc->status & E1000_RXD_STAT_DD) {
452
		struct sk_buff *skb;
453
		u8 status;
454
455
		if (*work_done >= work_to_do)
456
			break;
457
		(*work_done)++;
458
459
		status = rx_desc->status;
460
		skb = buffer_info->skb;
461
		buffer_info->skb = NULL;
462
463
		prefetch(skb->data - NET_IP_ALIGN);
464
465
		i++;
466
		if (i == rx_ring->count)
467
			i = 0;
468
		next_rxd = E1000_RX_DESC(*rx_ring, i);
469
		prefetch(next_rxd);
470
471
		next_buffer = &rx_ring->buffer_info[i];
472
473
		cleaned = 1;
474
		cleaned_count++;
475
		pci_unmap_single(pdev,
476
				 buffer_info->dma,
477
				 adapter->rx_buffer_len,
478
				 PCI_DMA_FROMDEVICE);
479
		buffer_info->dma = 0;
480
481
		length = le16_to_cpu(rx_desc->length);
482
483
		/* !EOP means multiple descriptors were used to store a single
484
		 * packet, also make sure the frame isn't just CRC only */
485
		if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
486
			/* All receives must fit into a single buffer */
487
			ndev_dbg(netdev, "%s: Receive packet consumed "
488
				 "multiple buffers\n", netdev->name);
489
			/* recycle */
490
			buffer_info->skb = skb;
491
			goto next_desc;
492
		}
493
494
		if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
495
			/* recycle */
496
			buffer_info->skb = skb;
497
			goto next_desc;
498
		}
499
500
		/* adjust length to remove Ethernet CRC */
501
		length -= 4;
502
503
		/* probably a little skewed due to removing CRC */
504
		total_rx_bytes += length;
505
		total_rx_packets++;
506
507
		/* code added for copybreak, this should improve
508
		 * performance for small packets with large amounts
509
		 * of reassembly being done in the stack */
510
		if (length < copybreak) {
511
			struct sk_buff *new_skb =
512
			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
513
			if (new_skb) {
514
				skb_reserve(new_skb, NET_IP_ALIGN);
515
				memcpy(new_skb->data - NET_IP_ALIGN,
516
				       skb->data - NET_IP_ALIGN,
517
				       length + NET_IP_ALIGN);
518
				/* save the skb in buffer_info as good */
519
				buffer_info->skb = skb;
520
				skb = new_skb;
521
			}
522
			/* else just continue with the old one */
523
		}
524
		/* end copybreak code */
525
		skb_put(skb, length);
526
527
		/* Receive Checksum Offload */
528
		e1000_rx_checksum(adapter,
529
				  (u32)(status) |
530
				  ((u32)(rx_desc->errors) << 24),
531
				  le16_to_cpu(rx_desc->csum), skb);
532
533
		e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
534
535
next_desc:
536
		rx_desc->status = 0;
537
538
		/* return some buffers to hardware, one at a time is too slow */
539
		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
540
			adapter->alloc_rx_buf(adapter, cleaned_count);
541
			cleaned_count = 0;
542
		}
543
544
		/* use prefetched values */
545
		rx_desc = next_rxd;
546
		buffer_info = next_buffer;
547
	}
548
	rx_ring->next_to_clean = i;
549
550
	cleaned_count = e1000_desc_unused(rx_ring);
551
	if (cleaned_count)
552
		adapter->alloc_rx_buf(adapter, cleaned_count);
553
554
	adapter->total_rx_packets += total_rx_packets;
555
	adapter->total_rx_bytes += total_rx_bytes;
556
	return cleaned;
557
}
558
559
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
560
			       u16 length)
561
{
562
	bi->page = NULL;
563
	skb->len += length;
564
	skb->data_len += length;
565
	skb->truesize += length;
566
}
567
568
static void e1000_put_txbuf(struct e1000_adapter *adapter,
569
			     struct e1000_buffer *buffer_info)
570
{
571
	if (buffer_info->dma) {
572
		pci_unmap_page(adapter->pdev, buffer_info->dma,
573
			       buffer_info->length, PCI_DMA_TODEVICE);
574
		buffer_info->dma = 0;
575
	}
576
	if (buffer_info->skb) {
577
		dev_kfree_skb_any(buffer_info->skb);
578
		buffer_info->skb = NULL;
579
	}
580
}
581
582
static void e1000_print_tx_hang(struct e1000_adapter *adapter)
583
{
584
	struct e1000_ring *tx_ring = adapter->tx_ring;
585
	unsigned int i = tx_ring->next_to_clean;
586
	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
587
	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
588
	struct net_device *netdev = adapter->netdev;
589
590
	/* detected Tx unit hang */
591
	ndev_err(netdev,
592
		 "Detected Tx Unit Hang:\n"
593
		 "  TDH                  <%x>\n"
594
		 "  TDT                  <%x>\n"
595
		 "  next_to_use          <%x>\n"
596
		 "  next_to_clean        <%x>\n"
597
		 "buffer_info[next_to_clean]:\n"
598
		 "  time_stamp           <%lx>\n"
599
		 "  next_to_watch        <%x>\n"
600
		 "  jiffies              <%lx>\n"
601
		 "  next_to_watch.status <%x>\n",
602
		 readl(adapter->hw.hw_addr + tx_ring->head),
603
		 readl(adapter->hw.hw_addr + tx_ring->tail),
604
		 tx_ring->next_to_use,
605
		 tx_ring->next_to_clean,
606
		 tx_ring->buffer_info[eop].time_stamp,
607
		 eop,
608
		 jiffies,
609
		 eop_desc->upper.fields.status);
610
}
611
612
/**
613
 * e1000_clean_tx_irq - Reclaim resources after transmit completes
614
 * @adapter: board private structure
615
 *
616
 * the return value indicates whether actual cleaning was done, there
617
 * is no guarantee that everything was cleaned
618
 **/
619
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
620
{
621
	struct net_device *netdev = adapter->netdev;
622
	struct e1000_hw *hw = &adapter->hw;
623
	struct e1000_ring *tx_ring = adapter->tx_ring;
624
	struct e1000_tx_desc *tx_desc, *eop_desc;
625
	struct e1000_buffer *buffer_info;
626
	unsigned int i, eop;
627
	unsigned int count = 0;
628
	bool cleaned = 0;
629
	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
630
631
	i = tx_ring->next_to_clean;
632
	eop = tx_ring->buffer_info[i].next_to_watch;
633
	eop_desc = E1000_TX_DESC(*tx_ring, eop);
634
635
	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
636
		for (cleaned = 0; !cleaned; ) {
637
			tx_desc = E1000_TX_DESC(*tx_ring, i);
638
			buffer_info = &tx_ring->buffer_info[i];
639
			cleaned = (i == eop);
640
641
			if (cleaned) {
642
				struct sk_buff *skb = buffer_info->skb;
643
				unsigned int segs, bytecount;
644
				segs = skb_shinfo(skb)->gso_segs ?: 1;
645
				/* multiply data chunks by size of headers */
646
				bytecount = ((segs - 1) * skb_headlen(skb)) +
647
					    skb->len;
648
				total_tx_packets += segs;
649
				total_tx_bytes += bytecount;
650
			}
651
652
			e1000_put_txbuf(adapter, buffer_info);
653
			tx_desc->upper.data = 0;
654
655
			i++;
656
			if (i == tx_ring->count)
657
				i = 0;
658
		}
659
660
		eop = tx_ring->buffer_info[i].next_to_watch;
661
		eop_desc = E1000_TX_DESC(*tx_ring, eop);
662
#define E1000_TX_WEIGHT 64
663
		/* weight of a sort for tx, to avoid endless transmit cleanup */
664
		if (count++ == E1000_TX_WEIGHT)
665
			break;
666
	}
667
668
	tx_ring->next_to_clean = i;
669
670
#define TX_WAKE_THRESHOLD 32
671
	if (cleaned && netif_carrier_ok(netdev) &&
672
		     e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
673
		/* Make sure that anybody stopping the queue after this
674
		 * sees the new next_to_clean.
675
		 */
676
		smp_mb();
677
678
		if (netif_queue_stopped(netdev) &&
679
		    !(test_bit(__E1000_DOWN, &adapter->state))) {
680
			netif_wake_queue(netdev);
681
			++adapter->restart_queue;
682
		}
683
	}
684
685
	if (adapter->detect_tx_hung) {
686
		/* Detect a transmit hang in hardware, this serializes the
687
		 * check with the clearing of time_stamp and movement of i */
688
		adapter->detect_tx_hung = 0;
689
		if (tx_ring->buffer_info[eop].dma &&
690
		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
691
			       + (adapter->tx_timeout_factor * HZ))
692
		    && !(er32(STATUS) &
693
			 E1000_STATUS_TXOFF)) {
694
			e1000_print_tx_hang(adapter);
695
			netif_stop_queue(netdev);
696
		}
697
	}
698
	adapter->total_tx_bytes += total_tx_bytes;
699
	adapter->total_tx_packets += total_tx_packets;
700
	return cleaned;
701
}
702
703
/**
704
 * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy
705
 * @adapter: board private structure
706
 *
707
 * the return value indicates whether actual cleaning was done, there
708
 * is no guarantee that everything was cleaned
709
 **/
710
static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter,
711
				     int *work_done, int work_to_do)
712
{
713
	struct net_device *netdev = adapter->netdev;
714
	struct pci_dev *pdev = adapter->pdev;
715
	struct e1000_ring *rx_ring = adapter->rx_ring;
716
	struct e1000_rx_desc *rx_desc, *next_rxd;
717
	struct e1000_buffer *buffer_info, *next_buffer;
718
	u32 length;
719
	unsigned int i;
720
	int cleaned_count = 0;
721
	bool cleaned = 0;
722
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
723
724
	i = rx_ring->next_to_clean;
725
	rx_desc = E1000_RX_DESC(*rx_ring, i);
726
	buffer_info = &rx_ring->buffer_info[i];
727
728
	while (rx_desc->status & E1000_RXD_STAT_DD) {
729
		struct sk_buff *skb;
730
		u8 status;
731
732
		if (*work_done >= work_to_do)
733
			break;
734
		(*work_done)++;
735
736
		status = rx_desc->status;
737
		skb = buffer_info->skb;
738
		buffer_info->skb = NULL;
739
740
		i++;
741
		if (i == rx_ring->count)
742
			i = 0;
743
		next_rxd = E1000_RX_DESC(*rx_ring, i);
744
		prefetch(next_rxd);
745
746
		next_buffer = &rx_ring->buffer_info[i];
747
748
		cleaned = 1;
749
		cleaned_count++;
750
		pci_unmap_page(pdev,
751
			       buffer_info->dma,
752
			       PAGE_SIZE,
753
			       PCI_DMA_FROMDEVICE);
754
		buffer_info->dma = 0;
755
756
		length = le16_to_cpu(rx_desc->length);
757
758
		/* errors is only valid for DD + EOP descriptors */
759
		if ((status & E1000_RXD_STAT_EOP) &&
760
		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
761
			/* recycle both page and skb */
762
			buffer_info->skb = skb;
763
			/* an error means any chain goes out the window too */
764
			if (rx_ring->rx_skb_top)
765
				dev_kfree_skb(rx_ring->rx_skb_top);
766
			rx_ring->rx_skb_top = NULL;
767
			goto next_desc;
768
		}
769
770
#define rxtop rx_ring->rx_skb_top
771
		if (!(status & E1000_RXD_STAT_EOP)) {
772
			/* this descriptor is only the beginning (or middle) */
773
			if (!rxtop) {
774
				/* this is the beginning of a chain */
775
				rxtop = skb;
776
				skb_fill_page_desc(rxtop, 0, buffer_info->page,
777
						   0, length);
778
			} else {
779
				/* this is the middle of a chain */
780
				skb_fill_page_desc(rxtop,
781
						   skb_shinfo(rxtop)->nr_frags,
782
						   buffer_info->page, 0,
783
						   length);
784
				/* re-use the skb, only consumed the page */
785
				buffer_info->skb = skb;
786
			}
787
			e1000_consume_page(buffer_info, rxtop, length);
788
			goto next_desc;
789
		} else {
790
			if (rxtop) {
791
				/* end of the chain */
792
				skb_fill_page_desc(rxtop,
793
				    skb_shinfo(rxtop)->nr_frags,
794
				    buffer_info->page, 0, length);
795
				/* re-use the current skb, we only consumed the
796
				 * page */
797
				buffer_info->skb = skb;
798
				skb = rxtop;
799
				rxtop = NULL;
800
				e1000_consume_page(buffer_info, skb, length);
801
			} else {
802
				/* no chain, got EOP, this buf is the packet
803
				 * copybreak to save the put_page/alloc_page */
804
				if (length <= copybreak &&
805
				    skb_tailroom(skb) >= length) {
806
					u8 *vaddr;
807
					vaddr = kmap_atomic(buffer_info->page,
808
							   KM_SKB_DATA_SOFTIRQ);
809
					memcpy(skb_tail_pointer(skb),
810
					       vaddr, length);
811
					kunmap_atomic(vaddr,
812
						      KM_SKB_DATA_SOFTIRQ);
813
					/* re-use the page, so don't erase
814
					 * buffer_info->page */
815
					skb_put(skb, length);
816
				} else {
817
					skb_fill_page_desc(skb, 0,
818
							   buffer_info->page, 0,
819
							   length);
820
					e1000_consume_page(buffer_info, skb,
821
							   length);
822
				}
823
			}
824
		}
825
826
		/* Receive Checksum Offload XXX recompute due to CRC strip? */
827
		e1000_rx_checksum(adapter,
828
				  (u32)(status) |
829
				  ((u32)(rx_desc->errors) << 24),
830
				  le16_to_cpu(rx_desc->csum), skb);
831
832
		pskb_trim(skb, skb->len - 4);
833
834
		/* probably a little skewed due to removing CRC */
835
		total_rx_bytes += skb->len;
836
		total_rx_packets++;
837
838
		/* eth type trans needs skb->data to point to something */
839
		if (!pskb_may_pull(skb, ETH_HLEN)) {
840
			ndev_err(netdev, "__pskb_pull_tail failed.\n");
841
			dev_kfree_skb(skb);
842
			goto next_desc;
843
		}
844
845
		e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
846
847
next_desc:
848
		rx_desc->status = 0;
849
850
		/* return some buffers to hardware, one at a time is too slow */
851
		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
852
			adapter->alloc_rx_buf(adapter, cleaned_count);
853
			cleaned_count = 0;
854
		}
855
856
		/* use prefetched values */
857
		rx_desc = next_rxd;
858
		buffer_info = next_buffer;
859
	}
860
	rx_ring->next_to_clean = i;
861
862
	cleaned_count = e1000_desc_unused(rx_ring);
863
	if (cleaned_count)
864
		adapter->alloc_rx_buf(adapter, cleaned_count);
865
866
	adapter->total_rx_packets += total_rx_packets;
867
	adapter->total_rx_bytes += total_rx_bytes;
868
	return cleaned;
869
}
870
871
/**
872
 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
873
 * @adapter: board private structure
874
 *
875
 * the return value indicates whether actual cleaning was done, there
876
 * is no guarantee that everything was cleaned
877
 **/
878
static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
879
				  int *work_done, int work_to_do)
880
{
881
	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
882
	struct net_device *netdev = adapter->netdev;
883
	struct pci_dev *pdev = adapter->pdev;
884
	struct e1000_ring *rx_ring = adapter->rx_ring;
885
	struct e1000_buffer *buffer_info, *next_buffer;
886
	struct e1000_ps_page *ps_page;
887
	struct sk_buff *skb;
888
	unsigned int i, j;
889
	u32 length, staterr;
890
	int cleaned_count = 0;
891
	bool cleaned = 0;
892
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
893
894
	i = rx_ring->next_to_clean;
895
	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
896
	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
897
	buffer_info = &rx_ring->buffer_info[i];
898
899
	while (staterr & E1000_RXD_STAT_DD) {
900
		if (*work_done >= work_to_do)
901
			break;
902
		(*work_done)++;
903
		skb = buffer_info->skb;
904
905
		/* in the packet split case this is header only */
906
		prefetch(skb->data - NET_IP_ALIGN);
907
908
		i++;
909
		if (i == rx_ring->count)
910
			i = 0;
911
		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
912
		prefetch(next_rxd);
913
914
		next_buffer = &rx_ring->buffer_info[i];
915
916
		cleaned = 1;
917
		cleaned_count++;
918
		pci_unmap_single(pdev, buffer_info->dma,
919
				 adapter->rx_ps_bsize0,
920
				 PCI_DMA_FROMDEVICE);
921
		buffer_info->dma = 0;
922
923
		if (!(staterr & E1000_RXD_STAT_EOP)) {
924
			ndev_dbg(netdev, "%s: Packet Split buffers didn't pick "
925
				 "up the full packet\n", netdev->name);
926
			dev_kfree_skb_irq(skb);
927
			goto next_desc;
928
		}
929
930
		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
931
			dev_kfree_skb_irq(skb);
932
			goto next_desc;
933
		}
934
935
		length = le16_to_cpu(rx_desc->wb.middle.length0);
936
937
		if (!length) {
938
			ndev_dbg(netdev, "%s: Last part of the packet spanning"
939
				 " multiple descriptors\n", netdev->name);
940
			dev_kfree_skb_irq(skb);
941
			goto next_desc;
942
		}
943
944
		/* Good Receive */
945
		skb_put(skb, length);
946
947
		{
948
		/* this looks ugly, but it seems compiler issues make it
949
		   more efficient than reusing j */
950
		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
951
952
		/* page alloc/put takes too long and effects small packet
953
		 * throughput, so unsplit small packets and save the alloc/put*/
954
		if (l1 && (l1 <= copybreak) &&
955
		    ((length + l1) <= adapter->rx_ps_bsize0)) {
956
			u8 *vaddr;
957
958
			ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS];
959
960
			/* there is no documentation about how to call
961
			 * kmap_atomic, so we can't hold the mapping
962
			 * very long */
963
			pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
964
				PAGE_SIZE, PCI_DMA_FROMDEVICE);
965
			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
966
			memcpy(skb_tail_pointer(skb), vaddr, l1);
967
			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
968
			pci_dma_sync_single_for_device(pdev, ps_page->dma,
969
				PAGE_SIZE, PCI_DMA_FROMDEVICE);
970
			/* remove the CRC */
971
			l1 -= 4;
972
			skb_put(skb, l1);
973
			goto copydone;
974
		} /* if */
975
		}
976
977
		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
978
			length = le16_to_cpu(rx_desc->wb.upper.length[j]);
979
			if (!length)
980
				break;
981
982
			ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j];
983
			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
984
				       PCI_DMA_FROMDEVICE);
985
			ps_page->dma = 0;
986
			skb_fill_page_desc(skb, j, ps_page->page, 0, length);
987
			ps_page->page = NULL;
988
			skb->len += length;
989
			skb->data_len += length;
990
			skb->truesize += length;
991
		}
992
993
		/* strip the ethernet crc, problem is we're using pages now so
994
		 * this whole operation can get a little cpu intensive */
995
		pskb_trim(skb, skb->len - 4);
996
997
copydone:
998
		total_rx_bytes += skb->len;
999
		total_rx_packets++;
1000
1001
		e1000_rx_checksum(adapter, staterr, le16_to_cpu(
1002
			rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
1003
1004
		if (rx_desc->wb.upper.header_status &
1005
			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1006
			adapter->rx_hdr_split++;
1007
1008
		e1000_receive_skb(adapter, netdev, skb,
1009
				  staterr, rx_desc->wb.middle.vlan);
1010
1011
next_desc:
1012
		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1013
		buffer_info->skb = NULL;
1014
1015
		/* return some buffers to hardware, one at a time is too slow */
1016
		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1017
			adapter->alloc_rx_buf(adapter, cleaned_count);
1018
			cleaned_count = 0;
1019
		}
1020
1021
		/* use prefetched values */
1022
		rx_desc = next_rxd;
1023
		buffer_info = next_buffer;
1024
1025
		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1026
	}
1027
	rx_ring->next_to_clean = i;
1028
1029
	cleaned_count = e1000_desc_unused(rx_ring);
1030
	if (cleaned_count)
1031
		adapter->alloc_rx_buf(adapter, cleaned_count);
1032
1033
	adapter->total_rx_packets += total_rx_packets;
1034
	adapter->total_rx_bytes += total_rx_bytes;
1035
	return cleaned;
1036
}
1037
1038
/**
1039
 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1040
 * @adapter: board private structure
1041
 **/
1042
static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1043
{
1044
	struct e1000_ring *rx_ring = adapter->rx_ring;
1045
	struct e1000_buffer *buffer_info;
1046
	struct e1000_ps_page *ps_page;
1047
	struct pci_dev *pdev = adapter->pdev;
1048
	unsigned long size;
1049
	unsigned int i, j;
1050
1051
	/* Free all the Rx ring sk_buffs */
1052
	for (i = 0; i < rx_ring->count; i++) {
1053
		buffer_info = &rx_ring->buffer_info[i];
1054
		if (buffer_info->dma) {
1055
			if (adapter->clean_rx == e1000_clean_rx_irq)
1056
				pci_unmap_single(pdev, buffer_info->dma,
1057
						 adapter->rx_buffer_len,
1058
						 PCI_DMA_FROMDEVICE);
1059
			else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo)
1060
				pci_unmap_page(pdev, buffer_info->dma,
1061
					       PAGE_SIZE, PCI_DMA_FROMDEVICE);
1062
			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1063
				pci_unmap_single(pdev, buffer_info->dma,
1064
						 adapter->rx_ps_bsize0,
1065
						 PCI_DMA_FROMDEVICE);
1066
			buffer_info->dma = 0;
1067
		}
1068
1069
		if (buffer_info->page) {
1070
			put_page(buffer_info->page);
1071
			buffer_info->page = NULL;
1072
		}
1073
1074
		if (buffer_info->skb) {
1075
			dev_kfree_skb(buffer_info->skb);
1076
			buffer_info->skb = NULL;
1077
		}
1078
1079
		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1080
			ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
1081
						     + j];
1082
			if (!ps_page->page)
1083
				break;
1084
			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
1085
				       PCI_DMA_FROMDEVICE);
1086
			ps_page->dma = 0;
1087
			put_page(ps_page->page);
1088
			ps_page->page = NULL;
1089
		}
1090
	}
1091
1092
	/* there also may be some cached data from a chained receive */
1093
	if (rx_ring->rx_skb_top) {
1094
		dev_kfree_skb(rx_ring->rx_skb_top);
1095
		rx_ring->rx_skb_top = NULL;
1096
	}
1097
1098
	size = sizeof(struct e1000_buffer) * rx_ring->count;
1099
	memset(rx_ring->buffer_info, 0, size);
1100
	size = sizeof(struct e1000_ps_page)
1101
	       * (rx_ring->count * PS_PAGE_BUFFERS);
1102
	memset(rx_ring->ps_pages, 0, size);
1103
1104
	/* Zero out the descriptor ring */
1105
	memset(rx_ring->desc, 0, rx_ring->size);
1106
1107
	rx_ring->next_to_clean = 0;
1108
	rx_ring->next_to_use = 0;
1109
1110
	writel(0, adapter->hw.hw_addr + rx_ring->head);
1111
	writel(0, adapter->hw.hw_addr + rx_ring->tail);
1112
}
1113
1114
/**
1115
 * e1000_intr_msi - Interrupt Handler
1116
 * @irq: interrupt number
1117
 * @data: pointer to a network interface device structure
1118
 **/
1119
static irqreturn_t e1000_intr_msi(int irq, void *data)
1120
{
1121
	struct net_device *netdev = data;
1122
	struct e1000_adapter *adapter = netdev_priv(netdev);
1123
	struct e1000_hw *hw = &adapter->hw;
1124
	u32 icr = er32(ICR);
1125
1126
	/* read ICR disables interrupts using IAM, so keep up with our
1127
	 * enable/disable accounting */
1128
	atomic_inc(&adapter->irq_sem);
1129
1130
	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1131
		hw->mac.get_link_status = 1;
1132
		/* ICH8 workaround-- Call gig speed drop workaround on cable
1133
		 * disconnect (LSC) before accessing any PHY registers */
1134
		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1135
		    (!(er32(STATUS) & E1000_STATUS_LU)))
1136
			e1000e_gig_downshift_workaround_ich8lan(hw);
1137
1138
		/* 80003ES2LAN workaround-- For packet buffer work-around on
1139
		 * link down event; disable receives here in the ISR and reset
1140
		 * adapter in watchdog */
1141
		if (netif_carrier_ok(netdev) &&
1142
		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
1143
			/* disable receives */
1144
			u32 rctl = er32(RCTL);
1145
			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1146
		}
1147
		/* guard against interrupt when we're going down */
1148
		if (!test_bit(__E1000_DOWN, &adapter->state))
1149
			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1150
	}
1151
1152
	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1153
		adapter->total_tx_bytes = 0;
1154
		adapter->total_tx_packets = 0;
1155
		adapter->total_rx_bytes = 0;
1156
		adapter->total_rx_packets = 0;
1157
		__netif_rx_schedule(netdev, &adapter->napi);
1158
	} else {
1159
		atomic_dec(&adapter->irq_sem);
1160
	}
1161
1162
	return IRQ_HANDLED;
1163
}
1164
1165
/**
1166
 * e1000_intr - Interrupt Handler
1167
 * @irq: interrupt number
1168
 * @data: pointer to a network interface device structure
1169
 **/
1170
static irqreturn_t e1000_intr(int irq, void *data)
1171
{
1172
	struct net_device *netdev = data;
1173
	struct e1000_adapter *adapter = netdev_priv(netdev);
1174
	struct e1000_hw *hw = &adapter->hw;
1175
1176
	u32 rctl, icr = er32(ICR);
1177
	if (!icr)
1178
		return IRQ_NONE;  /* Not our interrupt */
1179
1180
	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1181
	 * not set, then the adapter didn't send an interrupt */
1182
	if (!(icr & E1000_ICR_INT_ASSERTED))
1183
		return IRQ_NONE;
1184
1185
	/* Interrupt Auto-Mask...upon reading ICR,
1186
	 * interrupts are masked.  No need for the
1187
	 * IMC write, but it does mean we should
1188
	 * account for it ASAP. */
1189
	atomic_inc(&adapter->irq_sem);
1190
1191
	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1192
		hw->mac.get_link_status = 1;
1193
		/* ICH8 workaround-- Call gig speed drop workaround on cable
1194
		 * disconnect (LSC) before accessing any PHY registers */
1195
		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1196
		    (!(er32(STATUS) & E1000_STATUS_LU)))
1197
			e1000e_gig_downshift_workaround_ich8lan(hw);
1198
1199
		/* 80003ES2LAN workaround--
1200
		 * For packet buffer work-around on link down event;
1201
		 * disable receives here in the ISR and
1202
		 * reset adapter in watchdog
1203
		 */
1204
		if (netif_carrier_ok(netdev) &&
1205
		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1206
			/* disable receives */
1207
			rctl = er32(RCTL);
1208
			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1209
		}
1210
		/* guard against interrupt when we're going down */
1211
		if (!test_bit(__E1000_DOWN, &adapter->state))
1212
			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1213
	}
1214
1215
	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1216
		adapter->total_tx_bytes = 0;
1217
		adapter->total_tx_packets = 0;
1218
		adapter->total_rx_bytes = 0;
1219
		adapter->total_rx_packets = 0;
1220
		__netif_rx_schedule(netdev, &adapter->napi);
1221
	} else {
1222
		atomic_dec(&adapter->irq_sem);
1223
	}
1224
1225
	return IRQ_HANDLED;
1226
}
1227
1228
static int e1000_request_irq(struct e1000_adapter *adapter)
1229
{
1230
	struct net_device *netdev = adapter->netdev;
1231
	void (*handler) = &e1000_intr;
1232
	int irq_flags = IRQF_SHARED;
1233
	int err;
1234
1235
	err = pci_enable_msi(adapter->pdev);
1236
	if (err) {
1237
		ndev_warn(netdev,
1238
		 "Unable to allocate MSI interrupt Error: %d\n", err);
1239
	} else {
1240
		adapter->flags |= FLAG_MSI_ENABLED;
1241
		handler = &e1000_intr_msi;
1242
		irq_flags = 0;
1243
	}
1244
1245
	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
1246
			  netdev);
1247
	if (err) {
1248
		if (adapter->flags & FLAG_MSI_ENABLED)
1249
			pci_disable_msi(adapter->pdev);
1250
		ndev_err(netdev,
1251
		       "Unable to allocate interrupt Error: %d\n", err);
1252
	}
1253
1254
	return err;
1255
}
1256
1257
static void e1000_free_irq(struct e1000_adapter *adapter)
1258
{
1259
	struct net_device *netdev = adapter->netdev;
1260
1261
	free_irq(adapter->pdev->irq, netdev);
1262
	if (adapter->flags & FLAG_MSI_ENABLED) {
1263
		pci_disable_msi(adapter->pdev);
1264
		adapter->flags &= ~FLAG_MSI_ENABLED;
1265
	}
1266
}
1267
1268
/**
1269
 * e1000_irq_disable - Mask off interrupt generation on the NIC
1270
 **/
1271
static void e1000_irq_disable(struct e1000_adapter *adapter)
1272
{
1273
	struct e1000_hw *hw = &adapter->hw;
1274
1275
	atomic_inc(&adapter->irq_sem);
1276
	ew32(IMC, ~0);
1277
	e1e_flush();
1278
	synchronize_irq(adapter->pdev->irq);
1279
}
1280
1281
/**
1282
 * e1000_irq_enable - Enable default interrupt generation settings
1283
 **/
1284
static void e1000_irq_enable(struct e1000_adapter *adapter)
1285
{
1286
	struct e1000_hw *hw = &adapter->hw;
1287
1288
	if (atomic_dec_and_test(&adapter->irq_sem)) {
1289
		ew32(IMS, IMS_ENABLE_MASK);
1290
		e1e_flush();
1291
	}
1292
}
1293
1294
/**
1295
 * e1000_get_hw_control - get control of the h/w from f/w
1296
 * @adapter: address of board private structure
1297
 *
1298
 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
1299
 * For ASF and Pass Through versions of f/w this means that
1300
 * the driver is loaded. For AMT version (only with 82573)
1301
 * of the f/w this means that the network i/f is open.
1302
 **/
1303
static void e1000_get_hw_control(struct e1000_adapter *adapter)
1304
{
1305
	struct e1000_hw *hw = &adapter->hw;
1306
	u32 ctrl_ext;
1307
	u32 swsm;
1308
1309
	/* Let firmware know the driver has taken over */
1310
	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1311
		swsm = er32(SWSM);
1312
		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1313
	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1314
		ctrl_ext = er32(CTRL_EXT);
1315
		ew32(CTRL_EXT,
1316
				ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1317
	}
1318
}
1319
1320
/**
1321
 * e1000_release_hw_control - release control of the h/w to f/w
1322
 * @adapter: address of board private structure
1323
 *
1324
 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
1325
 * For ASF and Pass Through versions of f/w this means that the
1326
 * driver is no longer loaded. For AMT version (only with 82573) i
1327
 * of the f/w this means that the network i/f is closed.
1328
 *
1329
 **/
1330
static void e1000_release_hw_control(struct e1000_adapter *adapter)
1331
{
1332
	struct e1000_hw *hw = &adapter->hw;
1333
	u32 ctrl_ext;
1334
	u32 swsm;
1335
1336
	/* Let firmware taken over control of h/w */
1337
	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
1338
		swsm = er32(SWSM);
1339
		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1340
	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1341
		ctrl_ext = er32(CTRL_EXT);
1342
		ew32(CTRL_EXT,
1343
				ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1344
	}
1345
}
1346
1347
static void e1000_release_manageability(struct e1000_adapter *adapter)
1348
{
1349
	if (adapter->flags & FLAG_MNG_PT_ENABLED) {
1350
		struct e1000_hw *hw = &adapter->hw;
1351
1352
		u32 manc = er32(MANC);
1353
1354
		/* re-enable hardware interception of ARP */
1355
		manc |= E1000_MANC_ARP_EN;
1356
		manc &= ~E1000_MANC_EN_MNG2HOST;
1357
1358
		/* don't explicitly have to mess with MANC2H since
1359
		 * MANC has an enable disable that gates MANC2H */
1360
		ew32(MANC, manc);
1361
	}
1362
}
1363
1364
/**
1365
 * @e1000_alloc_ring - allocate memory for a ring structure
1366
 **/
1367
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
1368
				struct e1000_ring *ring)
1369
{
1370
	struct pci_dev *pdev = adapter->pdev;
1371
1372
	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
1373
					GFP_KERNEL);
1374
	if (!ring->desc)
1375
		return -ENOMEM;
1376
1377
	return 0;
1378
}
1379
1380
/**
1381
 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1382
 * @adapter: board private structure
1383
 *
1384
 * Return 0 on success, negative on failure
1385
 **/
1386
int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1387
{
1388
	struct e1000_ring *tx_ring = adapter->tx_ring;
1389
	int err = -ENOMEM, size;
1390
1391
	size = sizeof(struct e1000_buffer) * tx_ring->count;
1392
	tx_ring->buffer_info = vmalloc(size);
1393
	if (!tx_ring->buffer_info)
1394
		goto err;
1395
	memset(tx_ring->buffer_info, 0, size);
1396
1397
	/* round up to nearest 4K */
1398
	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1399
	tx_ring->size = ALIGN(tx_ring->size, 4096);
1400
1401
	err = e1000_alloc_ring_dma(adapter, tx_ring);
1402
	if (err)
1403
		goto err;
1404
1405
	tx_ring->next_to_use = 0;
1406
	tx_ring->next_to_clean = 0;
1407
	spin_lock_init(&adapter->tx_queue_lock);
1408
1409
	return 0;
1410
err:
1411
	vfree(tx_ring->buffer_info);
1412
	ndev_err(adapter->netdev,
1413
	"Unable to allocate memory for the transmit descriptor ring\n");
1414
	return err;
1415
}
1416
1417
/**
1418
 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1419
 * @adapter: board private structure
1420
 *
1421
 * Returns 0 on success, negative on failure
1422
 **/
1423
int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1424
{
1425
	struct e1000_ring *rx_ring = adapter->rx_ring;
1426
	int size, desc_len, err = -ENOMEM;
1427
1428
	size = sizeof(struct e1000_buffer) * rx_ring->count;
1429
	rx_ring->buffer_info = vmalloc(size);
1430
	if (!rx_ring->buffer_info)
1431
		goto err;
1432
	memset(rx_ring->buffer_info, 0, size);
1433
1434
	rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS,
1435
				    sizeof(struct e1000_ps_page),
1436
				    GFP_KERNEL);
1437
	if (!rx_ring->ps_pages)
1438
		goto err;
1439
1440
	desc_len = sizeof(union e1000_rx_desc_packet_split);
1441
1442
	/* Round up to nearest 4K */
1443
	rx_ring->size = rx_ring->count * desc_len;
1444
	rx_ring->size = ALIGN(rx_ring->size, 4096);
1445
1446
	err = e1000_alloc_ring_dma(adapter, rx_ring);
1447
	if (err)
1448
		goto err;
1449
1450
	rx_ring->next_to_clean = 0;
1451
	rx_ring->next_to_use = 0;
1452
	rx_ring->rx_skb_top = NULL;
1453
1454
	return 0;
1455
err:
1456
	vfree(rx_ring->buffer_info);
1457
	kfree(rx_ring->ps_pages);
1458
	ndev_err(adapter->netdev,
1459
	"Unable to allocate memory for the transmit descriptor ring\n");
1460
	return err;
1461
}
1462
1463
/**
1464
 * e1000_clean_tx_ring - Free Tx Buffers
1465
 * @adapter: board private structure
1466
 **/
1467
static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
1468
{
1469
	struct e1000_ring *tx_ring = adapter->tx_ring;
1470
	struct e1000_buffer *buffer_info;
1471
	unsigned long size;
1472
	unsigned int i;
1473
1474
	for (i = 0; i < tx_ring->count; i++) {
1475
		buffer_info = &tx_ring->buffer_info[i];
1476
		e1000_put_txbuf(adapter, buffer_info);
1477
	}
1478
1479
	size = sizeof(struct e1000_buffer) * tx_ring->count;
1480
	memset(tx_ring->buffer_info, 0, size);
1481
1482
	memset(tx_ring->desc, 0, tx_ring->size);
1483
1484
	tx_ring->next_to_use = 0;
1485
	tx_ring->next_to_clean = 0;
1486
1487
	writel(0, adapter->hw.hw_addr + tx_ring->head);
1488
	writel(0, adapter->hw.hw_addr + tx_ring->tail);
1489
}
1490
1491
/**
1492
 * e1000e_free_tx_resources - Free Tx Resources per Queue
1493
 * @adapter: board private structure
1494
 *
1495
 * Free all transmit software resources
1496
 **/
1497
void e1000e_free_tx_resources(struct e1000_adapter *adapter)
1498
{
1499
	struct pci_dev *pdev = adapter->pdev;
1500
	struct e1000_ring *tx_ring = adapter->tx_ring;
1501
1502
	e1000_clean_tx_ring(adapter);
1503
1504
	vfree(tx_ring->buffer_info);
1505
	tx_ring->buffer_info = NULL;
1506
1507
	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1508
			  tx_ring->dma);
1509
	tx_ring->desc = NULL;
1510
}
1511
1512
/**
1513
 * e1000e_free_rx_resources - Free Rx Resources
1514
 * @adapter: board private structure
1515
 *
1516
 * Free all receive software resources
1517
 **/
1518
1519
void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1520
{
1521
	struct pci_dev *pdev = adapter->pdev;
1522
	struct e1000_ring *rx_ring = adapter->rx_ring;
1523
1524
	e1000_clean_rx_ring(adapter);
1525
1526
	vfree(rx_ring->buffer_info);
1527
	rx_ring->buffer_info = NULL;
1528
1529
	kfree(rx_ring->ps_pages);
1530
	rx_ring->ps_pages = NULL;
1531
1532
	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1533
			  rx_ring->dma);
1534
	rx_ring->desc = NULL;
1535
}
1536
1537
/**
1538
 * e1000_update_itr - update the dynamic ITR value based on statistics
1539
 *      Stores a new ITR value based on packets and byte
1540
 *      counts during the last interrupt.  The advantage of per interrupt
1541
 *      computation is faster updates and more accurate ITR for the current
1542
 *      traffic pattern.  Constants in this function were computed
1543
 *      based on theoretical maximum wire speed and thresholds were set based
1544
 *      on testing data as well as attempting to minimize response time
1545
 *      while increasing bulk throughput.
1546
 *      this functionality is controlled by the InterruptThrottleRate module
1547
 *      parameter (see e1000_param.c)
1548
 * @adapter: pointer to adapter
1549
 * @itr_setting: current adapter->itr
1550
 * @packets: the number of packets during this measurement interval
1551
 * @bytes: the number of bytes during this measurement interval
1552
 **/
1553
static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1554
				     u16 itr_setting, int packets,
1555
				     int bytes)
1556
{
1557
	unsigned int retval = itr_setting;
1558
1559
	if (packets == 0)
1560
		goto update_itr_done;
1561
1562
	switch (itr_setting) {
1563
	case lowest_latency:
1564
		/* handle TSO and jumbo frames */
1565
		if (bytes/packets > 8000)
1566
			retval = bulk_latency;
1567
		else if ((packets < 5) && (bytes > 512)) {
1568
			retval = low_latency;
1569
		}
1570
		break;
1571
	case low_latency:  /* 50 usec aka 20000 ints/s */
1572
		if (bytes > 10000) {
1573
			/* this if handles the TSO accounting */
1574
			if (bytes/packets > 8000) {
1575
				retval = bulk_latency;
1576
			} else if ((packets < 10) || ((bytes/packets) > 1200)) {
1577
				retval = bulk_latency;
1578
			} else if ((packets > 35)) {
1579
				retval = lowest_latency;
1580
			}
1581
		} else if (bytes/packets > 2000) {
1582
			retval = bulk_latency;
1583
		} else if (packets <= 2 && bytes < 512) {
1584
			retval = lowest_latency;
1585
		}
1586
		break;
1587
	case bulk_latency: /* 250 usec aka 4000 ints/s */
1588
		if (bytes > 25000) {
1589
			if (packets > 35) {
1590
				retval = low_latency;
1591
			}
1592
		} else if (bytes < 6000) {
1593
			retval = low_latency;
1594
		}
1595
		break;
1596
	}
1597
1598
update_itr_done:
1599
	return retval;
1600
}
1601
1602
static void e1000_set_itr(struct e1000_adapter *adapter)
1603
{
1604
	struct e1000_hw *hw = &adapter->hw;
1605
	u16 current_itr;
1606
	u32 new_itr = adapter->itr;
1607
1608
	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1609
	if (adapter->link_speed != SPEED_1000) {
1610
		current_itr = 0;
1611
		new_itr = 4000;
1612
		goto set_itr_now;
1613
	}
1614
1615
	adapter->tx_itr = e1000_update_itr(adapter,
1616
				    adapter->tx_itr,
1617
				    adapter->total_tx_packets,
1618
				    adapter->total_tx_bytes);
1619
	/* conservative mode (itr 3) eliminates the lowest_latency setting */
1620
	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
1621
		adapter->tx_itr = low_latency;
1622
1623
	adapter->rx_itr = e1000_update_itr(adapter,
1624
				    adapter->rx_itr,
1625
				    adapter->total_rx_packets,
1626
				    adapter->total_rx_bytes);
1627
	/* conservative mode (itr 3) eliminates the lowest_latency setting */
1628
	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
1629
		adapter->rx_itr = low_latency;
1630
1631
	current_itr = max(adapter->rx_itr, adapter->tx_itr);
1632
1633
	switch (current_itr) {
1634
	/* counts and packets in update_itr are dependent on these numbers */
1635
	case lowest_latency:
1636
		new_itr = 70000;
1637
		break;
1638
	case low_latency:
1639
		new_itr = 20000; /* aka hwitr = ~200 */
1640
		break;
1641
	case bulk_latency:
1642
		new_itr = 4000;
1643
		break;
1644
	default:
1645
		break;
1646
	}
1647
1648
set_itr_now:
1649
	if (new_itr != adapter->itr) {
1650
		/* this attempts to bias the interrupt rate towards Bulk
1651
		 * by adding intermediate steps when interrupt rate is
1652
		 * increasing */
1653
		new_itr = new_itr > adapter->itr ?
1654
			     min(adapter->itr + (new_itr >> 2), new_itr) :
1655
			     new_itr;
1656
		adapter->itr = new_itr;
1657
		ew32(ITR, 1000000000 / (new_itr * 256));
1658
	}
1659
}
1660
1661
/**
1662
 * e1000_clean - NAPI Rx polling callback
1663
 * @adapter: board private structure
1664
 **/
1665
static int e1000_clean(struct napi_struct *napi, int budget)
1666
{
1667
	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
1668
	struct net_device *poll_dev = adapter->netdev;
1669
	int tx_cleaned = 0, work_done = 0;
1670
1671
	/* Must NOT use netdev_priv macro here. */
1672
	adapter = poll_dev->priv;
1673
1674
	/* Keep link state information with original netdev */
1675
	if (!netif_carrier_ok(poll_dev))
1676
		goto quit_polling;
1677
1678
	/* e1000_clean is called per-cpu.  This lock protects
1679
	 * tx_ring from being cleaned by multiple cpus
1680
	 * simultaneously.  A failure obtaining the lock means
1681
	 * tx_ring is currently being cleaned anyway. */
1682
	if (spin_trylock(&adapter->tx_queue_lock)) {
1683
		tx_cleaned = e1000_clean_tx_irq(adapter);
1684
		spin_unlock(&adapter->tx_queue_lock);
1685
	}
1686
1687
	adapter->clean_rx(adapter, &work_done, budget);
1688
1689
	/* If no Tx and not enough Rx work done, exit the polling mode */
1690
	if ((!tx_cleaned && (work_done < budget)) ||
1691
	   !netif_running(poll_dev)) {
1692
quit_polling:
1693
		if (adapter->itr_setting & 3)
1694
			e1000_set_itr(adapter);
1695
		netif_rx_complete(poll_dev, napi);
1696
		e1000_irq_enable(adapter);
1697
	}
1698
1699
	return work_done;
1700
}
1701
1702
static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1703
{
1704
	struct e1000_adapter *adapter = netdev_priv(netdev);
1705
	struct e1000_hw *hw = &adapter->hw;
1706
	u32 vfta, index;
1707
1708
	/* don't update vlan cookie if already programmed */
1709
	if ((adapter->hw.mng_cookie.status &
1710
	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1711
	    (vid == adapter->mng_vlan_id))
1712
		return;
1713
	/* add VID to filter table */
1714
	index = (vid >> 5) & 0x7F;
1715
	vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
1716
	vfta |= (1 << (vid & 0x1F));
1717
	e1000e_write_vfta(hw, index, vfta);
1718
}
1719
1720
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1721
{
1722
	struct e1000_adapter *adapter = netdev_priv(netdev);
1723
	struct e1000_hw *hw = &adapter->hw;
1724
	u32 vfta, index;
1725
1726
	e1000_irq_disable(adapter);
1727
	vlan_group_set_device(adapter->vlgrp, vid, NULL);
1728
	e1000_irq_enable(adapter);
1729
1730
	if ((adapter->hw.mng_cookie.status &
1731
	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1732
	    (vid == adapter->mng_vlan_id)) {
1733
		/* release control to f/w */
1734
		e1000_release_hw_control(adapter);
1735
		return;
1736
	}
1737
1738
	/* remove VID from filter table */
1739
	index = (vid >> 5) & 0x7F;
1740
	vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
1741
	vfta &= ~(1 << (vid & 0x1F));
1742
	e1000e_write_vfta(hw, index, vfta);
1743
}
1744
1745
static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
1746
{
1747
	struct net_device *netdev = adapter->netdev;
1748
	u16 vid = adapter->hw.mng_cookie.vlan_id;
1749
	u16 old_vid = adapter->mng_vlan_id;
1750
1751
	if (!adapter->vlgrp)
1752
		return;
1753
1754
	if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1755
		adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1756
		if (adapter->hw.mng_cookie.status &
1757
			E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1758
			e1000_vlan_rx_add_vid(netdev, vid);
1759
			adapter->mng_vlan_id = vid;
1760
		}
1761
1762
		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
1763
				(vid != old_vid) &&
1764
		    !vlan_group_get_device(adapter->vlgrp, old_vid))
1765
			e1000_vlan_rx_kill_vid(netdev, old_vid);
1766
	} else {
1767
		adapter->mng_vlan_id = vid;
1768
	}
1769
}
1770
1771
1772
static void e1000_vlan_rx_register(struct net_device *netdev,
1773
				   struct vlan_group *grp)
1774
{
1775
	struct e1000_adapter *adapter = netdev_priv(netdev);
1776
	struct e1000_hw *hw = &adapter->hw;
1777
	u32 ctrl, rctl;
1778
1779
	e1000_irq_disable(adapter);
1780
	adapter->vlgrp = grp;
1781
1782
	if (grp) {
1783
		/* enable VLAN tag insert/strip */
1784
		ctrl = er32(CTRL);
1785
		ctrl |= E1000_CTRL_VME;
1786
		ew32(CTRL, ctrl);
1787
1788
		if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
1789
			/* enable VLAN receive filtering */
1790
			rctl = er32(RCTL);
1791
			rctl |= E1000_RCTL_VFE;
1792
			rctl &= ~E1000_RCTL_CFIEN;
1793
			ew32(RCTL, rctl);
1794
			e1000_update_mng_vlan(adapter);
1795
		}
1796
	} else {
1797
		/* disable VLAN tag insert/strip */
1798
		ctrl = er32(CTRL);
1799
		ctrl &= ~E1000_CTRL_VME;
1800
		ew32(CTRL, ctrl);
1801
1802
		if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
1803
			/* disable VLAN filtering */
1804
			rctl = er32(RCTL);
1805
			rctl &= ~E1000_RCTL_VFE;
1806
			ew32(RCTL, rctl);
1807
			if (adapter->mng_vlan_id !=
1808
			    (u16)E1000_MNG_VLAN_NONE) {
1809
				e1000_vlan_rx_kill_vid(netdev,
1810
						       adapter->mng_vlan_id);
1811
				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1812
			}
1813
		}
1814
	}
1815
1816
	e1000_irq_enable(adapter);
1817
}
1818
1819
static void e1000_restore_vlan(struct e1000_adapter *adapter)
1820
{
1821
	u16 vid;
1822
1823
	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1824
1825
	if (!adapter->vlgrp)
1826
		return;
1827
1828
	for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1829
		if (!vlan_group_get_device(adapter->vlgrp, vid))
1830
			continue;
1831
		e1000_vlan_rx_add_vid(adapter->netdev, vid);
1832
	}
1833
}
1834
1835
static void e1000_init_manageability(struct e1000_adapter *adapter)
1836
{
1837
	struct e1000_hw *hw = &adapter->hw;
1838
	u32 manc, manc2h;
1839
1840
	if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
1841
		return;
1842
1843
	manc = er32(MANC);
1844
1845
	/* disable hardware interception of ARP */
1846
	manc &= ~(E1000_MANC_ARP_EN);
1847
1848
	/* enable receiving management packets to the host. this will probably
1849
	 * generate destination unreachable messages from the host OS, but
1850
	 * the packets will be handled on SMBUS */
1851
	manc |= E1000_MANC_EN_MNG2HOST;
1852
	manc2h = er32(MANC2H);
1853
#define E1000_MNG2HOST_PORT_623 (1 << 5)
1854
#define E1000_MNG2HOST_PORT_664 (1 << 6)
1855
	manc2h |= E1000_MNG2HOST_PORT_623;
1856
	manc2h |= E1000_MNG2HOST_PORT_664;
1857
	ew32(MANC2H, manc2h);
1858
	ew32(MANC, manc);
1859
}
1860
1861
/**
1862
 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1863
 * @adapter: board private structure
1864
 *
1865
 * Configure the Tx unit of the MAC after a reset.
1866
 **/
1867
static void e1000_configure_tx(struct e1000_adapter *adapter)
1868
{
1869
	struct e1000_hw *hw = &adapter->hw;
1870
	struct e1000_ring *tx_ring = adapter->tx_ring;
1871
	u64 tdba;
1872
	u32 tdlen, tctl, tipg, tarc;
1873
	u32 ipgr1, ipgr2;
1874
1875
	/* Setup the HW Tx Head and Tail descriptor pointers */
1876
	tdba = tx_ring->dma;
1877
	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
1878
	ew32(TDBAL, (tdba & DMA_32BIT_MASK));
1879
	ew32(TDBAH, (tdba >> 32));
1880
	ew32(TDLEN, tdlen);
1881
	ew32(TDH, 0);
1882
	ew32(TDT, 0);
1883
	tx_ring->head = E1000_TDH;
1884
	tx_ring->tail = E1000_TDT;
1885
1886
	/* Set the default values for the Tx Inter Packet Gap timer */
1887
	tipg = DEFAULT_82543_TIPG_IPGT_COPPER;          /*  8  */
1888
	ipgr1 = DEFAULT_82543_TIPG_IPGR1;               /*  8  */
1889
	ipgr2 = DEFAULT_82543_TIPG_IPGR2;               /*  6  */
1890
1891
	if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
1892
		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /*  7  */
1893
1894
	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1895
	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1896
	ew32(TIPG, tipg);
1897
1898
	/* Set the Tx Interrupt Delay register */
1899
	ew32(TIDV, adapter->tx_int_delay);
1900
	/* tx irq moderation */
1901
	ew32(TADV, adapter->tx_abs_int_delay);
1902
1903
	/* Program the Transmit Control Register */
1904
	tctl = er32(TCTL);
1905
	tctl &= ~E1000_TCTL_CT;
1906
	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1907
		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1908
1909
	if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
1910
		tarc = er32(TARC0);
1911
		/* set the speed mode bit, we'll clear it if we're not at
1912
		 * gigabit link later */
1913
#define SPEED_MODE_BIT (1 << 21)
1914
		tarc |= SPEED_MODE_BIT;
1915
		ew32(TARC0, tarc);
1916
	}
1917
1918
	/* errata: program both queues to unweighted RR */
1919
	if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
1920
		tarc = er32(TARC0);
1921
		tarc |= 1;
1922
		ew32(TARC0, tarc);
1923
		tarc = er32(TARC1);
1924
		tarc |= 1;
1925
		ew32(TARC1, tarc);
1926
	}
1927
1928
	e1000e_config_collision_dist(hw);
1929
1930
	/* Setup Transmit Descriptor Settings for eop descriptor */
1931
	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1932
1933
	/* only set IDE if we are delaying interrupts using the timers */
1934
	if (adapter->tx_int_delay)
1935
		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1936
1937
	/* enable Report Status bit */
1938
	adapter->txd_cmd |= E1000_TXD_CMD_RS;
1939
1940
	ew32(TCTL, tctl);
1941
1942
	adapter->tx_queue_len = adapter->netdev->tx_queue_len;
1943
}
1944
1945
/**
1946
 * e1000_setup_rctl - configure the receive control registers
1947
 * @adapter: Board private structure
1948
 **/
1949
#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1950
			   (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1951
static void e1000_setup_rctl(struct e1000_adapter *adapter)
1952
{
1953
	struct e1000_hw *hw = &adapter->hw;
1954
	u32 rctl, rfctl;
1955
	u32 psrctl = 0;
1956
	u32 pages = 0;
1957
1958
	/* Program MC offset vector base */
1959
	rctl = er32(RCTL);
1960
	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1961
	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1962
		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1963
		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1964
1965
	/* Do not Store bad packets */
1966
	rctl &= ~E1000_RCTL_SBP;
1967
1968
	/* Enable Long Packet receive */
1969
	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1970
		rctl &= ~E1000_RCTL_LPE;
1971
	else
1972
		rctl |= E1000_RCTL_LPE;
1973
1974
	/* Setup buffer sizes */
1975
	rctl &= ~E1000_RCTL_SZ_4096;
1976
	rctl |= E1000_RCTL_BSEX;
1977
	switch (adapter->rx_buffer_len) {
1978
	case 256:
1979
		rctl |= E1000_RCTL_SZ_256;
1980
		rctl &= ~E1000_RCTL_BSEX;
1981
		break;
1982
	case 512:
1983
		rctl |= E1000_RCTL_SZ_512;
1984
		rctl &= ~E1000_RCTL_BSEX;
1985
		break;
1986
	case 1024:
1987
		rctl |= E1000_RCTL_SZ_1024;
1988
		rctl &= ~E1000_RCTL_BSEX;
1989
		break;
1990
	case 2048:
1991
	default:
1992
		rctl |= E1000_RCTL_SZ_2048;
1993
		rctl &= ~E1000_RCTL_BSEX;
1994
		break;
1995
	case 4096:
1996
		rctl |= E1000_RCTL_SZ_4096;
1997
		break;
1998
	case 8192:
1999
		rctl |= E1000_RCTL_SZ_8192;
2000
		break;
2001
	case 16384:
2002
		rctl |= E1000_RCTL_SZ_16384;
2003
		break;
2004
	}
2005
2006
	/*
2007
	 * 82571 and greater support packet-split where the protocol
2008
	 * header is placed in skb->data and the packet data is
2009
	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2010
	 * In the case of a non-split, skb->data is linearly filled,
2011
	 * followed by the page buffers.  Therefore, skb->data is
2012
	 * sized to hold the largest protocol header.
2013
	 *
2014
	 * allocations using alloc_page take too long for regular MTU
2015
	 * so only enable packet split for jumbo frames
2016
	 *
2017
	 * Using pages when the page size is greater than 16k wastes
2018
	 * a lot of memory, since we allocate 3 pages at all times
2019
	 * per packet.
2020
	 */
2021
	adapter->rx_ps_pages = 0;
2022
	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2023
	if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2024
		adapter->rx_ps_pages = pages;
2025
2026
	if (adapter->rx_ps_pages) {
2027
		/* Configure extra packet-split registers */
2028
		rfctl = er32(RFCTL);
2029
		rfctl |= E1000_RFCTL_EXTEN;
2030
		/* disable packet split support for IPv6 extension headers,
2031
		 * because some malformed IPv6 headers can hang the RX */
2032
		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2033
			  E1000_RFCTL_NEW_IPV6_EXT_DIS);
2034
2035
		ew32(RFCTL, rfctl);
2036
2037
		/* disable the stripping of CRC because it breaks
2038
		 * BMC firmware connected over SMBUS */
2039
		rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */;
2040
2041
		psrctl |= adapter->rx_ps_bsize0 >>
2042
			E1000_PSRCTL_BSIZE0_SHIFT;
2043
2044
		switch (adapter->rx_ps_pages) {
2045
		case 3:
2046
			psrctl |= PAGE_SIZE <<
2047
				E1000_PSRCTL_BSIZE3_SHIFT;
2048
		case 2:
2049
			psrctl |= PAGE_SIZE <<
2050
				E1000_PSRCTL_BSIZE2_SHIFT;
2051
		case 1:
2052
			psrctl |= PAGE_SIZE >>
2053
				E1000_PSRCTL_BSIZE1_SHIFT;
2054
			break;
2055
		}
2056
2057
		ew32(PSRCTL, psrctl);
2058
	}
2059
2060
	ew32(RCTL, rctl);
2061
}
2062
2063
/**
2064
 * e1000_configure_rx - Configure Receive Unit after Reset
2065
 * @adapter: board private structure
2066
 *
2067
 * Configure the Rx unit of the MAC after a reset.
2068
 **/
2069
static void e1000_configure_rx(struct e1000_adapter *adapter)
2070
{
2071
	struct e1000_hw *hw = &adapter->hw;
2072
	struct e1000_ring *rx_ring = adapter->rx_ring;
2073
	u64 rdba;
2074
	u32 rdlen, rctl, rxcsum, ctrl_ext;
2075
2076
	if (adapter->rx_ps_pages) {
2077
		/* this is a 32 byte descriptor */
2078
		rdlen = rx_ring->count *
2079
			sizeof(union e1000_rx_desc_packet_split);
2080
		adapter->clean_rx = e1000_clean_rx_irq_ps;
2081
		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2082
	} else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) {
2083
		rdlen = rx_ring->count *
2084
			sizeof(struct e1000_rx_desc);
2085
		adapter->clean_rx = e1000_clean_rx_irq_jumbo;
2086
		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo;
2087
	} else {
2088
		rdlen = rx_ring->count *
2089
			sizeof(struct e1000_rx_desc);
2090
		adapter->clean_rx = e1000_clean_rx_irq;
2091
		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2092
	}
2093
2094
	/* disable receives while setting up the descriptors */
2095
	rctl = er32(RCTL);
2096
	ew32(RCTL, rctl & ~E1000_RCTL_EN);
2097
	e1e_flush();
2098
	msleep(10);
2099
2100
	/* set the Receive Delay Timer Register */
2101
	ew32(RDTR, adapter->rx_int_delay);
2102
2103
	/* irq moderation */
2104
	ew32(RADV, adapter->rx_abs_int_delay);
2105
	if (adapter->itr_setting != 0)
2106
		ew32(ITR,
2107
			1000000000 / (adapter->itr * 256));
2108
2109
	ctrl_ext = er32(CTRL_EXT);
2110
	/* Reset delay timers after every interrupt */
2111
	ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2112
	/* Auto-Mask interrupts upon ICR access */
2113
	ctrl_ext |= E1000_CTRL_EXT_IAME;
2114
	ew32(IAM, 0xffffffff);
2115
	ew32(CTRL_EXT, ctrl_ext);
2116
	e1e_flush();
2117
2118
	/* Setup the HW Rx Head and Tail Descriptor Pointers and
2119
	 * the Base and Length of the Rx Descriptor Ring */
2120
	rdba = rx_ring->dma;
2121
	ew32(RDBAL, (rdba & DMA_32BIT_MASK));
2122
	ew32(RDBAH, (rdba >> 32));
2123
	ew32(RDLEN, rdlen);
2124
	ew32(RDH, 0);
2125
	ew32(RDT, 0);
2126
	rx_ring->head = E1000_RDH;
2127
	rx_ring->tail = E1000_RDT;
2128
2129
	/* Enable Receive Checksum Offload for TCP and UDP */
2130
	rxcsum = er32(RXCSUM);
2131
	if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
2132
		rxcsum |= E1000_RXCSUM_TUOFL;
2133
2134
		/* IPv4 payload checksum for UDP fragments must be
2135
		 * used in conjunction with packet-split. */
2136
		if (adapter->rx_ps_pages)
2137
			rxcsum |= E1000_RXCSUM_IPPCSE;
2138
	} else {
2139
		rxcsum &= ~E1000_RXCSUM_TUOFL;
2140
		/* no need to clear IPPCSE as it defaults to 0 */
2141
	}
2142
	ew32(RXCSUM, rxcsum);
2143
2144
	/* Enable early receives on supported devices, only takes effect when
2145
	 * packet size is equal or larger than the specified value (in 8 byte
2146
	 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */
2147
	if ((adapter->flags & FLAG_HAS_ERT) &&
2148
	    (adapter->netdev->mtu > ETH_DATA_LEN))
2149
		ew32(ERT, E1000_ERT_2048);
2150
2151
	/* Enable Receives */
2152
	ew32(RCTL, rctl);
2153
}
2154
2155
/**
2156
 *  e1000_mc_addr_list_update - Update Multicast addresses
2157
 *  @hw: pointer to the HW structure
2158
 *  @mc_addr_list: array of multicast addresses to program
2159
 *  @mc_addr_count: number of multicast addresses to program
2160
 *  @rar_used_count: the first RAR register free to program
2161
 *  @rar_count: total number of supported Receive Address Registers
2162
 *
2163
 *  Updates the Receive Address Registers and Multicast Table Array.
2164
 *  The caller must have a packed mc_addr_list of multicast addresses.
2165
 *  The parameter rar_count will usually be hw->mac.rar_entry_count
2166
 *  unless there are workarounds that change this.  Currently no func pointer
2167
 *  exists and all implementations are handled in the generic version of this
2168
 *  function.
2169
 **/
2170
static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list,
2171
			       u32 mc_addr_count, u32 rar_used_count,
2172
			       u32 rar_count)
2173
{
2174
	hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count,
2175
				        rar_used_count, rar_count);
2176
}
2177
2178
/**
2179
 * e1000_set_multi - Multicast and Promiscuous mode set
2180
 * @netdev: network interface device structure
2181
 *
2182
 * The set_multi entry point is called whenever the multicast address
2183
 * list or the network interface flags are updated.  This routine is
2184
 * responsible for configuring the hardware for proper multicast,
2185
 * promiscuous mode, and all-multi behavior.
2186
 **/
2187
static void e1000_set_multi(struct net_device *netdev)
2188
{
2189
	struct e1000_adapter *adapter = netdev_priv(netdev);
2190
	struct e1000_hw *hw = &adapter->hw;
2191
	struct e1000_mac_info *mac = &hw->mac;
2192
	struct dev_mc_list *mc_ptr;
2193
	u8  *mta_list;
2194
	u32 rctl;
2195
	int i;
2196
2197
	/* Check for Promiscuous and All Multicast modes */
2198
2199
	rctl = er32(RCTL);
2200
2201
	if (netdev->flags & IFF_PROMISC) {
2202
		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2203
	} else if (netdev->flags & IFF_ALLMULTI) {
2204
		rctl |= E1000_RCTL_MPE;
2205
		rctl &= ~E1000_RCTL_UPE;
2206
	} else {
2207
		rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2208
	}
2209
2210
	ew32(RCTL, rctl);
2211
2212
	if (netdev->mc_count) {
2213
		mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
2214
		if (!mta_list)
2215
			return;
2216
2217
		/* prepare a packed array of only addresses. */
2218
		mc_ptr = netdev->mc_list;
2219
2220
		for (i = 0; i < netdev->mc_count; i++) {
2221
			if (!mc_ptr)
2222
				break;
2223
			memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
2224
			       ETH_ALEN);
2225
			mc_ptr = mc_ptr->next;
2226
		}
2227
2228
		e1000_mc_addr_list_update(hw, mta_list, i, 1,
2229
					  mac->rar_entry_count);
2230
		kfree(mta_list);
2231
	} else {
2232
		/*
2233
		 * if we're called from probe, we might not have
2234
		 * anything to do here, so clear out the list
2235
		 */
2236
		e1000_mc_addr_list_update(hw, NULL, 0, 1,
2237
					  mac->rar_entry_count);
2238
	}
2239
}
2240
2241
/**
2242
 * e1000_configure - configure the hardware for RX and TX
2243
 * @adapter: private board structure
2244
 **/
2245
static void e1000_configure(struct e1000_adapter *adapter)
2246
{
2247
	e1000_set_multi(adapter->netdev);
2248
2249
	e1000_restore_vlan(adapter);
2250
	e1000_init_manageability(adapter);
2251
2252
	e1000_configure_tx(adapter);
2253
	e1000_setup_rctl(adapter);
2254
	e1000_configure_rx(adapter);
2255
	adapter->alloc_rx_buf(adapter,
2256
			      e1000_desc_unused(adapter->rx_ring));
2257
}
2258
2259
/**
2260
 * e1000e_power_up_phy - restore link in case the phy was powered down
2261
 * @adapter: address of board private structure
2262
 *
2263
 * The phy may be powered down to save power and turn off link when the
2264
 * driver is unloaded and wake on lan is not enabled (among others)
2265
 * *** this routine MUST be followed by a call to e1000e_reset ***
2266
 **/
2267
void e1000e_power_up_phy(struct e1000_adapter *adapter)
2268
{
2269
	u16 mii_reg = 0;
2270
2271
	/* Just clear the power down bit to wake the phy back up */
2272
	if (adapter->hw.media_type == e1000_media_type_copper) {
2273
		/* according to the manual, the phy will retain its
2274
		 * settings across a power-down/up cycle */
2275
		e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
2276
		mii_reg &= ~MII_CR_POWER_DOWN;
2277
		e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
2278
	}
2279
2280
	adapter->hw.mac.ops.setup_link(&adapter->hw);
2281
}
2282
2283
/**
2284
 * e1000_power_down_phy - Power down the PHY
2285
 *
2286
 * Power down the PHY so no link is implied when interface is down
2287
 * The PHY cannot be powered down is management or WoL is active
2288
 */
2289
static void e1000_power_down_phy(struct e1000_adapter *adapter)
2290
{
2291
	struct e1000_hw *hw = &adapter->hw;
2292
	u16 mii_reg;
2293
2294
	/* WoL is enabled */
2295
	if (!adapter->wol)
2296
		return;
2297
2298
	/* non-copper PHY? */
2299
	if (adapter->hw.media_type != e1000_media_type_copper)
2300
		return;
2301
2302
	/* reset is blocked because of a SoL/IDER session */
2303
	if (e1000e_check_mng_mode(hw) ||
2304
	    e1000_check_reset_block(hw))
2305
		return;
2306
2307
	/* managebility (AMT) is enabled */
2308
	if (er32(MANC) & E1000_MANC_SMBUS_EN)
2309
		return;
2310
2311
	/* power down the PHY */
2312
	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2313
	mii_reg |= MII_CR_POWER_DOWN;
2314
	e1e_wphy(hw, PHY_CONTROL, mii_reg);
2315
	mdelay(1);
2316
}
2317
2318
/**
2319
 * e1000e_reset - bring the hardware into a known good state
2320
 *
2321
 * This function boots the hardware and enables some settings that
2322
 * require a configuration cycle of the hardware - those cannot be
2323
 * set/changed during runtime. After reset the device needs to be
2324
 * properly configured for rx, tx etc.
2325
 */
2326
void e1000e_reset(struct e1000_adapter *adapter)
2327
{
2328
	struct e1000_mac_info *mac = &adapter->hw.mac;
2329
	struct e1000_hw *hw = &adapter->hw;
2330
	u32 tx_space, min_tx_space, min_rx_space;
2331
	u16 hwm;
2332
2333
	if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
2334
		/* To maintain wire speed transmits, the Tx FIFO should be
2335
		 * large enough to accommodate two full transmit packets,
2336
		 * rounded up to the next 1KB and expressed in KB.  Likewise,
2337
		 * the Rx FIFO should be large enough to accommodate at least
2338
		 * one full receive packet and is similarly rounded up and
2339
		 * expressed in KB. */
2340
		adapter->pba = er32(PBA);
2341
		/* upper 16 bits has Tx packet buffer allocation size in KB */
2342
		tx_space = adapter->pba >> 16;
2343
		/* lower 16 bits has Rx packet buffer allocation size in KB */
2344
		adapter->pba &= 0xffff;
2345
		/* the tx fifo also stores 16 bytes of information about the tx
2346
		 * but don't include ethernet FCS because hardware appends it */
2347
		min_tx_space = (mac->max_frame_size +
2348
				sizeof(struct e1000_tx_desc) -
2349
				ETH_FCS_LEN) * 2;
2350
		min_tx_space = ALIGN(min_tx_space, 1024);
2351
		min_tx_space >>= 10;
2352
		/* software strips receive CRC, so leave room for it */
2353
		min_rx_space = mac->max_frame_size;
2354
		min_rx_space = ALIGN(min_rx_space, 1024);
2355
		min_rx_space >>= 10;
2356
2357
		/* If current Tx allocation is less than the min Tx FIFO size,
2358
		 * and the min Tx FIFO size is less than the current Rx FIFO
2359
		 * allocation, take space away from current Rx allocation */
2360
		if (tx_space < min_tx_space &&
2361
		    ((min_tx_space - tx_space) < adapter->pba)) {
2362
			adapter->pba -= - (min_tx_space - tx_space);
2363
2364
			/* if short on rx space, rx wins and must trump tx
2365
			 * adjustment or use Early Receive if available */
2366
			if ((adapter->pba < min_rx_space) &&
2367
			    (!(adapter->flags & FLAG_HAS_ERT)))
2368
				/* ERT enabled in e1000_configure_rx */
2369
				adapter->pba = min_rx_space;
2370
		}
2371
	}
2372
2373
	ew32(PBA, adapter->pba);
2374
2375
	/* flow control settings */
2376
	/* The high water mark must be low enough to fit one full frame
2377
	 * (or the size used for early receive) above it in the Rx FIFO.
2378
	 * Set it to the lower of:
2379
	 * - 90% of the Rx FIFO size, and
2380
	 * - the full Rx FIFO size minus the early receive size (for parts
2381
	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
2382
	 * - the full Rx FIFO size minus one full frame */
2383
	if (adapter->flags & FLAG_HAS_ERT)
2384
		hwm = min(((adapter->pba << 10) * 9 / 10),
2385
			  ((adapter->pba << 10) - (E1000_ERT_2048 << 3)));
2386
	else
2387
		hwm = min(((adapter->pba << 10) * 9 / 10),
2388
			  ((adapter->pba << 10) - mac->max_frame_size));
2389
2390
	mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
2391
	mac->fc_low_water = mac->fc_high_water - 8;
2392
2393
	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2394
		mac->fc_pause_time = 0xFFFF;
2395
	else
2396
		mac->fc_pause_time = E1000_FC_PAUSE_TIME;
2397
	mac->fc = mac->original_fc;
2398
2399
	/* Allow time for pending master requests to run */
2400
	mac->ops.reset_hw(hw);
2401
	ew32(WUC, 0);
2402
2403
	if (mac->ops.init_hw(hw))
2404
		ndev_err(adapter->netdev, "Hardware Error\n");
2405
2406
	e1000_update_mng_vlan(adapter);
2407
2408
	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2409
	ew32(VET, ETH_P_8021Q);
2410
2411
	e1000e_reset_adaptive(hw);
2412
	e1000_get_phy_info(hw);
2413
2414
	if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2415
		u16 phy_data = 0;
2416
		/* speed up time to link by disabling smart power down, ignore
2417
		 * the return value of this function because there is nothing
2418
		 * different we would do if it failed */
2419
		e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2420
		phy_data &= ~IGP02E1000_PM_SPD;
2421
		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
2422
	}
2423
2424
	e1000_release_manageability(adapter);
2425
}
2426
2427
int e1000e_up(struct e1000_adapter *adapter)
2428
{
2429
	struct e1000_hw *hw = &adapter->hw;
2430
2431
	/* hardware has been reset, we need to reload some things */
2432
	e1000_configure(adapter);
2433
2434
	clear_bit(__E1000_DOWN, &adapter->state);
2435
2436
	napi_enable(&adapter->napi);
2437
	e1000_irq_enable(adapter);
2438
2439
	/* fire a link change interrupt to start the watchdog */
2440
	ew32(ICS, E1000_ICS_LSC);
2441
	return 0;
2442
}
2443
2444
void e1000e_down(struct e1000_adapter *adapter)
2445
{
2446
	struct net_device *netdev = adapter->netdev;
2447
	struct e1000_hw *hw = &adapter->hw;
2448
	u32 tctl, rctl;
2449
2450
	/* signal that we're down so the interrupt handler does not
2451
	 * reschedule our watchdog timer */
2452
	set_bit(__E1000_DOWN, &adapter->state);
2453
2454
	/* disable receives in the hardware */
2455
	rctl = er32(RCTL);
2456
	ew32(RCTL, rctl & ~E1000_RCTL_EN);
2457
	/* flush and sleep below */
2458
2459
	netif_stop_queue(netdev);
2460
2461
	/* disable transmits in the hardware */
2462
	tctl = er32(TCTL);
2463
	tctl &= ~E1000_TCTL_EN;
2464
	ew32(TCTL, tctl);
2465
	/* flush both disables and wait for them to finish */
2466
	e1e_flush();
2467
	msleep(10);
2468
2469
	napi_disable(&adapter->napi);
2470
	e1000_irq_disable(adapter);
2471
2472
	del_timer_sync(&adapter->watchdog_timer);
2473
	del_timer_sync(&adapter->phy_info_timer);
2474
2475
	netdev->tx_queue_len = adapter->tx_queue_len;
2476
	netif_carrier_off(netdev);
2477
	adapter->link_speed = 0;
2478
	adapter->link_duplex = 0;
2479
2480
	e1000e_reset(adapter);
2481
	e1000_clean_tx_ring(adapter);
2482
	e1000_clean_rx_ring(adapter);
2483
2484
	/*
2485
	 * TODO: for power management, we could drop the link and
2486
	 * pci_disable_device here.
2487
	 */
2488
}
2489
2490
void e1000e_reinit_locked(struct e1000_adapter *adapter)
2491
{
2492
	might_sleep();
2493
	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
2494
		msleep(1);
2495
	e1000e_down(adapter);
2496
	e1000e_up(adapter);
2497
	clear_bit(__E1000_RESETTING, &adapter->state);
2498
}
2499
2500
/**
2501
 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2502
 * @adapter: board private structure to initialize
2503
 *
2504
 * e1000_sw_init initializes the Adapter private data structure.
2505
 * Fields are initialized based on PCI device information and
2506
 * OS network device settings (MTU size).
2507
 **/
2508
static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2509
{
2510
	struct e1000_hw *hw = &adapter->hw;
2511
	struct net_device *netdev = adapter->netdev;
2512
2513
	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
2514
	adapter->rx_ps_bsize0 = 128;
2515
	hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2516
	hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2517
2518
	adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2519
	if (!adapter->tx_ring)
2520
		goto err;
2521
2522
	adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2523
	if (!adapter->rx_ring)
2524
		goto err;
2525
2526
	spin_lock_init(&adapter->tx_queue_lock);
2527
2528
	/* Explicitly disable IRQ since the NIC can be in any state. */
2529
	atomic_set(&adapter->irq_sem, 0);
2530
	e1000_irq_disable(adapter);
2531
2532
	spin_lock_init(&adapter->stats_lock);
2533
2534
	set_bit(__E1000_DOWN, &adapter->state);
2535
	return 0;
2536
2537
err:
2538
	ndev_err(netdev, "Unable to allocate memory for queues\n");
2539
	kfree(adapter->rx_ring);
2540
	kfree(adapter->tx_ring);
2541
	return -ENOMEM;
2542
}
2543
2544
/**
2545
 * e1000_open - Called when a network interface is made active
2546
 * @netdev: network interface device structure
2547
 *
2548
 * Returns 0 on success, negative value on failure
2549
 *
2550
 * The open entry point is called when a network interface is made
2551
 * active by the system (IFF_UP).  At this point all resources needed
2552
 * for transmit and receive operations are allocated, the interrupt
2553
 * handler is registered with the OS, the watchdog timer is started,
2554
 * and the stack is notified that the interface is ready.
2555
 **/
2556
static int e1000_open(struct net_device *netdev)
2557
{
2558
	struct e1000_adapter *adapter = netdev_priv(netdev);
2559
	struct e1000_hw *hw = &adapter->hw;
2560
	int err;
2561
2562
	/* disallow open during test */
2563
	if (test_bit(__E1000_TESTING, &adapter->state))
2564
		return -EBUSY;
2565
2566
	/* allocate transmit descriptors */
2567
	err = e1000e_setup_tx_resources(adapter);
2568
	if (err)
2569
		goto err_setup_tx;
2570
2571
	/* allocate receive descriptors */
2572
	err = e1000e_setup_rx_resources(adapter);
2573
	if (err)
2574
		goto err_setup_rx;
2575
2576
	e1000e_power_up_phy(adapter);
2577
2578
	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2579
	if ((adapter->hw.mng_cookie.status &
2580
	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
2581
		e1000_update_mng_vlan(adapter);
2582
2583
	/* If AMT is enabled, let the firmware know that the network
2584
	 * interface is now open */
2585
	if ((adapter->flags & FLAG_HAS_AMT) &&
2586
	    e1000e_check_mng_mode(&adapter->hw))
2587
		e1000_get_hw_control(adapter);
2588
2589
	/* before we allocate an interrupt, we must be ready to handle it.
2590
	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2591
	 * as soon as we call pci_request_irq, so we have to setup our
2592
	 * clean_rx handler before we do so.  */
2593
	e1000_configure(adapter);
2594
2595
	err = e1000_request_irq(adapter);
2596
	if (err)
2597
		goto err_req_irq;
2598
2599
	/* From here on the code is the same as e1000e_up() */
2600
	clear_bit(__E1000_DOWN, &adapter->state);
2601
2602
	napi_enable(&adapter->napi);
2603
2604
	e1000_irq_enable(adapter);
2605
2606
	/* fire a link status change interrupt to start the watchdog */
2607
	ew32(ICS, E1000_ICS_LSC);
2608
2609
	return 0;
2610
2611
err_req_irq:
2612
	e1000_release_hw_control(adapter);
2613
	e1000_power_down_phy(adapter);
2614
	e1000e_free_rx_resources(adapter);
2615
err_setup_rx:
2616
	e1000e_free_tx_resources(adapter);
2617
err_setup_tx:
2618
	e1000e_reset(adapter);
2619
2620
	return err;
2621
}
2622
2623
/**
2624
 * e1000_close - Disables a network interface
2625
 * @netdev: network interface device structure
2626
 *
2627
 * Returns 0, this is not allowed to fail
2628
 *
2629
 * The close entry point is called when an interface is de-activated
2630
 * by the OS.  The hardware is still under the drivers control, but
2631
 * needs to be disabled.  A global MAC reset is issued to stop the
2632
 * hardware, and all transmit and receive resources are freed.
2633
 **/
2634
static int e1000_close(struct net_device *netdev)
2635
{
2636
	struct e1000_adapter *adapter = netdev_priv(netdev);
2637
2638
	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
2639
	e1000e_down(adapter);
2640
	e1000_power_down_phy(adapter);
2641
	e1000_free_irq(adapter);
2642
2643
	e1000e_free_tx_resources(adapter);
2644
	e1000e_free_rx_resources(adapter);
2645
2646
	/* kill manageability vlan ID if supported, but not if a vlan with
2647
	 * the same ID is registered on the host OS (let 8021q kill it) */
2648
	if ((adapter->hw.mng_cookie.status &
2649
			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2650
	     !(adapter->vlgrp &&
2651
	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2652
		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2653
2654
	/* If AMT is enabled, let the firmware know that the network
2655
	 * interface is now closed */
2656
	if ((adapter->flags & FLAG_HAS_AMT) &&
2657
	    e1000e_check_mng_mode(&adapter->hw))
2658
		e1000_release_hw_control(adapter);
2659
2660
	return 0;
2661
}
2662
/**
2663
 * e1000_set_mac - Change the Ethernet Address of the NIC
2664
 * @netdev: network interface device structure
2665
 * @p: pointer to an address structure
2666
 *
2667
 * Returns 0 on success, negative on failure
2668
 **/
2669
static int e1000_set_mac(struct net_device *netdev, void *p)
2670
{
2671
	struct e1000_adapter *adapter = netdev_priv(netdev);
2672
	struct sockaddr *addr = p;
2673
2674
	if (!is_valid_ether_addr(addr->sa_data))
2675
		return -EADDRNOTAVAIL;
2676
2677
	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2678
	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2679
2680
	e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2681
2682
	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
2683
		/* activate the work around */
2684
		e1000e_set_laa_state_82571(&adapter->hw, 1);
2685
2686
		/* Hold a copy of the LAA in RAR[14] This is done so that
2687
		 * between the time RAR[0] gets clobbered  and the time it
2688
		 * gets fixed (in e1000_watchdog), the actual LAA is in one
2689
		 * of the RARs and no incoming packets directed to this port
2690
		 * are dropped. Eventually the LAA will be in RAR[0] and
2691
		 * RAR[14] */
2692
		e1000e_rar_set(&adapter->hw,
2693
			      adapter->hw.mac.addr,
2694
			      adapter->hw.mac.rar_entry_count - 1);
2695
	}
2696
2697
	return 0;
2698
}
2699
2700
/* Need to wait a few seconds after link up to get diagnostic information from
2701
 * the phy */
2702
static void e1000_update_phy_info(unsigned long data)
2703
{
2704
	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2705
	e1000_get_phy_info(&adapter->hw);
2706
}
2707
2708
/**
2709
 * e1000e_update_stats - Update the board statistics counters
2710
 * @adapter: board private structure
2711
 **/
2712
void e1000e_update_stats(struct e1000_adapter *adapter)
2713
{
2714
	struct e1000_hw *hw = &adapter->hw;
2715
	struct pci_dev *pdev = adapter->pdev;
2716
	unsigned long irq_flags;
2717
	u16 phy_tmp;
2718
2719
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2720
2721
	/*
2722
	 * Prevent stats update while adapter is being reset, or if the pci
2723
	 * connection is down.
2724
	 */
2725
	if (adapter->link_speed == 0)
2726
		return;
2727
	if (pci_channel_offline(pdev))
2728
		return;
2729
2730
	spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2731
2732
	/* these counters are modified from e1000_adjust_tbi_stats,
2733
	 * called from the interrupt context, so they must only
2734
	 * be written while holding adapter->stats_lock
2735
	 */
2736
2737
	adapter->stats.crcerrs += er32(CRCERRS);
2738
	adapter->stats.gprc += er32(GPRC);
2739
	adapter->stats.gorcl += er32(GORCL);
2740
	adapter->stats.gorch += er32(GORCH);
2741
	adapter->stats.bprc += er32(BPRC);
2742
	adapter->stats.mprc += er32(MPRC);
2743
	adapter->stats.roc += er32(ROC);
2744
2745
	if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2746
		adapter->stats.prc64 += er32(PRC64);
2747
		adapter->stats.prc127 += er32(PRC127);
2748
		adapter->stats.prc255 += er32(PRC255);
2749
		adapter->stats.prc511 += er32(PRC511);
2750
		adapter->stats.prc1023 += er32(PRC1023);
2751
		adapter->stats.prc1522 += er32(PRC1522);
2752
		adapter->stats.symerrs += er32(SYMERRS);
2753
		adapter->stats.sec += er32(SEC);
2754
	}
2755
2756
	adapter->stats.mpc += er32(MPC);
2757
	adapter->stats.scc += er32(SCC);
2758
	adapter->stats.ecol += er32(ECOL);
2759
	adapter->stats.mcc += er32(MCC);
2760
	adapter->stats.latecol += er32(LATECOL);
2761
	adapter->stats.dc += er32(DC);
2762
	adapter->stats.rlec += er32(RLEC);
2763
	adapter->stats.xonrxc += er32(XONRXC);
2764
	adapter->stats.xontxc += er32(XONTXC);
2765
	adapter->stats.xoffrxc += er32(XOFFRXC);
2766
	adapter->stats.xofftxc += er32(XOFFTXC);
2767
	adapter->stats.fcruc += er32(FCRUC);
2768
	adapter->stats.gptc += er32(GPTC);
2769
	adapter->stats.gotcl += er32(GOTCL);
2770
	adapter->stats.gotch += er32(GOTCH);
2771
	adapter->stats.rnbc += er32(RNBC);
2772
	adapter->stats.ruc += er32(RUC);
2773
	adapter->stats.rfc += er32(RFC);
2774
	adapter->stats.rjc += er32(RJC);
2775
	adapter->stats.torl += er32(TORL);
2776
	adapter->stats.torh += er32(TORH);
2777
	adapter->stats.totl += er32(TOTL);
2778
	adapter->stats.toth += er32(TOTH);
2779
	adapter->stats.tpr += er32(TPR);
2780
2781
	if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) {
2782
		adapter->stats.ptc64 += er32(PTC64);
2783
		adapter->stats.ptc127 += er32(PTC127);
2784
		adapter->stats.ptc255 += er32(PTC255);
2785
		adapter->stats.ptc511 += er32(PTC511);
2786
		adapter->stats.ptc1023 += er32(PTC1023);
2787
		adapter->stats.ptc1522 += er32(PTC1522);
2788
	}
2789
2790
	adapter->stats.mptc += er32(MPTC);
2791
	adapter->stats.bptc += er32(BPTC);
2792
2793
	/* used for adaptive IFS */
2794
2795
	hw->mac.tx_packet_delta = er32(TPT);
2796
	adapter->stats.tpt += hw->mac.tx_packet_delta;
2797
	hw->mac.collision_delta = er32(COLC);
2798
	adapter->stats.colc += hw->mac.collision_delta;
2799
2800
	adapter->stats.algnerrc += er32(ALGNERRC);
2801
	adapter->stats.rxerrc += er32(RXERRC);
2802
	adapter->stats.tncrs += er32(TNCRS);
2803
	adapter->stats.cexterr += er32(CEXTERR);
2804
	adapter->stats.tsctc += er32(TSCTC);
2805
	adapter->stats.tsctfc += er32(TSCTFC);
2806
2807
	adapter->stats.iac += er32(IAC);
2808
2809
	if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) {
2810
		adapter->stats.icrxoc += er32(ICRXOC);
2811
		adapter->stats.icrxptc += er32(ICRXPTC);
2812
		adapter->stats.icrxatc += er32(ICRXATC);
2813
		adapter->stats.ictxptc += er32(ICTXPTC);
2814
		adapter->stats.ictxatc += er32(ICTXATC);
2815
		adapter->stats.ictxqec += er32(ICTXQEC);
2816
		adapter->stats.ictxqmtc += er32(ICTXQMTC);
2817
		adapter->stats.icrxdmtc += er32(ICRXDMTC);
2818
	}
2819
2820
	/* Fill out the OS statistics structure */
2821
	adapter->net_stats.rx_packets = adapter->stats.gprc;
2822
	adapter->net_stats.tx_packets = adapter->stats.gptc;
2823
	adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2824
	adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2825
	adapter->net_stats.multicast = adapter->stats.mprc;
2826
	adapter->net_stats.collisions = adapter->stats.colc;
2827
2828
	/* Rx Errors */
2829
2830
	/* RLEC on some newer hardware can be incorrect so build
2831
	* our own version based on RUC and ROC */
2832
	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2833
		adapter->stats.crcerrs + adapter->stats.algnerrc +
2834
		adapter->stats.ruc + adapter->stats.roc +
2835
		adapter->stats.cexterr;
2836
	adapter->net_stats.rx_length_errors = adapter->stats.ruc +
2837
					      adapter->stats.roc;
2838
	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2839
	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2840
	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2841
2842
	/* Tx Errors */
2843
	adapter->net_stats.tx_errors = adapter->stats.ecol +
2844
				       adapter->stats.latecol;
2845
	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2846
	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2847
	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2848
2849
	/* Tx Dropped needs to be maintained elsewhere */
2850
2851
	/* Phy Stats */
2852
	if (hw->media_type == e1000_media_type_copper) {
2853
		if ((adapter->link_speed == SPEED_1000) &&
2854
		   (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
2855
			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2856
			adapter->phy_stats.idle_errors += phy_tmp;
2857
		}
2858
	}
2859
2860
	/* Management Stats */
2861
	adapter->stats.mgptc += er32(MGTPTC);
2862
	adapter->stats.mgprc += er32(MGTPRC);
2863
	adapter->stats.mgpdc += er32(MGTPDC);
2864
2865
	spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
2866
}
2867
2868
static void e1000_print_link_info(struct e1000_adapter *adapter)
2869
{
2870
	struct net_device *netdev = adapter->netdev;
2871
	struct e1000_hw *hw = &adapter->hw;
2872
	u32 ctrl = er32(CTRL);
2873
2874
	ndev_info(netdev,
2875
		"Link is Up %d Mbps %s, Flow Control: %s\n",
2876
		adapter->link_speed,
2877
		(adapter->link_duplex == FULL_DUPLEX) ?
2878
				"Full Duplex" : "Half Duplex",
2879
		((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
2880
				"RX/TX" :
2881
		((ctrl & E1000_CTRL_RFCE) ? "RX" :
2882
		((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2883
}
2884
2885
/**
2886
 * e1000_watchdog - Timer Call-back
2887
 * @data: pointer to adapter cast into an unsigned long
2888
 **/
2889
static void e1000_watchdog(unsigned long data)
2890
{
2891
	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2892
2893
	/* Do the rest outside of interrupt context */
2894
	schedule_work(&adapter->watchdog_task);
2895
2896
	/* TODO: make this use queue_delayed_work() */
2897
}
2898
2899
static void e1000_watchdog_task(struct work_struct *work)
2900
{
2901
	struct e1000_adapter *adapter = container_of(work,
2902
					struct e1000_adapter, watchdog_task);
2903
2904
	struct net_device *netdev = adapter->netdev;
2905
	struct e1000_mac_info *mac = &adapter->hw.mac;
2906
	struct e1000_ring *tx_ring = adapter->tx_ring;
2907
	struct e1000_hw *hw = &adapter->hw;
2908
	u32 link, tctl;
2909
	s32 ret_val;
2910
	int tx_pending = 0;
2911
2912
	if ((netif_carrier_ok(netdev)) &&
2913
	    (er32(STATUS) & E1000_STATUS_LU))
2914
		goto link_up;
2915
2916
	ret_val = mac->ops.check_for_link(hw);
2917
	if ((ret_val == E1000_ERR_PHY) &&
2918
	    (adapter->hw.phy.type == e1000_phy_igp_3) &&
2919
	    (er32(CTRL) &
2920
	     E1000_PHY_CTRL_GBE_DISABLE)) {
2921
		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2922
		ndev_info(netdev,
2923
			"Gigabit has been disabled, downgrading speed\n");
2924
	}
2925
2926
	if ((e1000e_enable_tx_pkt_filtering(hw)) &&
2927
	    (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
2928
		e1000_update_mng_vlan(adapter);
2929
2930
	if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2931
	   !(er32(TXCW) & E1000_TXCW_ANE))
2932
		link = adapter->hw.mac.serdes_has_link;
2933
	else
2934
		link = er32(STATUS) & E1000_STATUS_LU;
2935
2936
	if (link) {
2937
		if (!netif_carrier_ok(netdev)) {
2938
			bool txb2b = 1;
2939
			mac->ops.get_link_up_info(&adapter->hw,
2940
						   &adapter->link_speed,
2941
						   &adapter->link_duplex);
2942
			e1000_print_link_info(adapter);
2943
			/* tweak tx_queue_len according to speed/duplex
2944
			 * and adjust the timeout factor */
2945
			netdev->tx_queue_len = adapter->tx_queue_len;
2946
			adapter->tx_timeout_factor = 1;
2947
			switch (adapter->link_speed) {
2948
			case SPEED_10:
2949
				txb2b = 0;
2950
				netdev->tx_queue_len = 10;
2951
				adapter->tx_timeout_factor = 14;
2952
				break;
2953
			case SPEED_100:
2954
				txb2b = 0;
2955
				netdev->tx_queue_len = 100;
2956
				/* maybe add some timeout factor ? */
2957
				break;
2958
			}
2959
2960
			/* workaround: re-program speed mode bit after
2961
			 * link-up event */
2962
			if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
2963
			    !txb2b) {
2964
				u32 tarc0;
2965
				tarc0 = er32(TARC0);
2966
				tarc0 &= ~SPEED_MODE_BIT;
2967
				ew32(TARC0, tarc0);
2968
			}
2969
2970
			/* disable TSO for pcie and 10/100 speeds, to avoid
2971
			 * some hardware issues */
2972
			if (!(adapter->flags & FLAG_TSO_FORCE)) {
2973
				switch (adapter->link_speed) {
2974
				case SPEED_10:
2975
				case SPEED_100:
2976
					ndev_info(netdev,
2977
					"10/100 speed: disabling TSO\n");
2978
					netdev->features &= ~NETIF_F_TSO;
2979
					netdev->features &= ~NETIF_F_TSO6;
2980
					break;
2981
				case SPEED_1000:
2982
					netdev->features |= NETIF_F_TSO;
2983
					netdev->features |= NETIF_F_TSO6;
2984
					break;
2985
				default:
2986
					/* oops */
2987
					break;
2988
				}
2989
			}
2990
2991
			/* enable transmits in the hardware, need to do this
2992
			 * after setting TARC0 */
2993
			tctl = er32(TCTL);
2994
			tctl |= E1000_TCTL_EN;
2995
			ew32(TCTL, tctl);
2996
2997
			netif_carrier_on(netdev);
2998
			netif_wake_queue(netdev);
2999
3000
			if (!test_bit(__E1000_DOWN, &adapter->state))
3001
				mod_timer(&adapter->phy_info_timer,
3002
					  round_jiffies(jiffies + 2 * HZ));
3003
		} else {
3004
			/* make sure the receive unit is started */
3005
			if (adapter->flags & FLAG_RX_NEEDS_RESTART) {
3006
				u32 rctl = er32(RCTL);
3007
				ew32(RCTL, rctl |
3008
						E1000_RCTL_EN);
3009
			}
3010
		}
3011
	} else {
3012
		if (netif_carrier_ok(netdev)) {
3013
			adapter->link_speed = 0;
3014
			adapter->link_duplex = 0;
3015
			ndev_info(netdev, "Link is Down\n");
3016
			netif_carrier_off(netdev);
3017
			netif_stop_queue(netdev);
3018
			if (!test_bit(__E1000_DOWN, &adapter->state))
3019
				mod_timer(&adapter->phy_info_timer,
3020
					  round_jiffies(jiffies + 2 * HZ));
3021
3022
			if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3023
				schedule_work(&adapter->reset_task);
3024
		}
3025
	}
3026
3027
link_up:
3028
	e1000e_update_stats(adapter);
3029
3030
	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3031
	adapter->tpt_old = adapter->stats.tpt;
3032
	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
3033
	adapter->colc_old = adapter->stats.colc;
3034
3035
	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
3036
	adapter->gorcl_old = adapter->stats.gorcl;
3037
	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
3038
	adapter->gotcl_old = adapter->stats.gotcl;
3039
3040
	e1000e_update_adaptive(&adapter->hw);
3041
3042
	if (!netif_carrier_ok(netdev)) {
3043
		tx_pending = (e1000_desc_unused(tx_ring) + 1 <
3044
			       tx_ring->count);
3045
		if (tx_pending) {
3046
			/* We've lost link, so the controller stops DMA,
3047
			 * but we've got queued Tx work that's never going
3048
			 * to get done, so reset controller to flush Tx.
3049
			 * (Do the reset outside of interrupt context). */
3050
			adapter->tx_timeout_count++;
3051
			schedule_work(&adapter->reset_task);
3052
		}
3053
	}
3054
3055
	/* Cause software interrupt to ensure rx ring is cleaned */
3056
	ew32(ICS, E1000_ICS_RXDMT0);
3057
3058
	/* Force detection of hung controller every watchdog period */
3059
	adapter->detect_tx_hung = 1;
3060
3061
	/* With 82571 controllers, LAA may be overwritten due to controller
3062
	 * reset from the other port. Set the appropriate LAA in RAR[0] */
3063
	if (e1000e_get_laa_state_82571(hw))
3064
		e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
3065
3066
	/* Reset the timer */
3067
	if (!test_bit(__E1000_DOWN, &adapter->state))
3068
		mod_timer(&adapter->watchdog_timer,
3069
			  round_jiffies(jiffies + 2 * HZ));
3070
}
3071
3072
#define E1000_TX_FLAGS_CSUM		0x00000001
3073
#define E1000_TX_FLAGS_VLAN		0x00000002
3074
#define E1000_TX_FLAGS_TSO		0x00000004
3075
#define E1000_TX_FLAGS_IPV4		0x00000008
3076
#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
3077
#define E1000_TX_FLAGS_VLAN_SHIFT	16
3078
3079
static int e1000_tso(struct e1000_adapter *adapter,
3080
		     struct sk_buff *skb)
3081
{
3082
	struct e1000_ring *tx_ring = adapter->tx_ring;
3083
	struct e1000_context_desc *context_desc;
3084
	struct e1000_buffer *buffer_info;
3085
	unsigned int i;
3086
	u32 cmd_length = 0;
3087
	u16 ipcse = 0, tucse, mss;
3088
	u8 ipcss, ipcso, tucss, tucso, hdr_len;
3089
	int err;
3090
3091
	if (skb_is_gso(skb)) {
3092
		if (skb_header_cloned(skb)) {
3093
			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3094
			if (err)
3095
				return err;
3096
		}
3097
3098
		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3099
		mss = skb_shinfo(skb)->gso_size;
3100
		if (skb->protocol == htons(ETH_P_IP)) {
3101
			struct iphdr *iph = ip_hdr(skb);
3102
			iph->tot_len = 0;
3103
			iph->check = 0;
3104
			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3105
								 iph->daddr, 0,
3106
								 IPPROTO_TCP,
3107
								 0);
3108
			cmd_length = E1000_TXD_CMD_IP;
3109
			ipcse = skb_transport_offset(skb) - 1;
3110
		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3111
			ipv6_hdr(skb)->payload_len = 0;
3112
			tcp_hdr(skb)->check =
3113
				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3114
						 &ipv6_hdr(skb)->daddr,
3115
						 0, IPPROTO_TCP, 0);
3116
			ipcse = 0;
3117
		}
3118
		ipcss = skb_network_offset(skb);
3119
		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3120
		tucss = skb_transport_offset(skb);
3121
		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3122
		tucse = 0;
3123
3124
		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3125
			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3126
3127
		i = tx_ring->next_to_use;
3128
		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3129
		buffer_info = &tx_ring->buffer_info[i];
3130
3131
		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
3132
		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
3133
		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
3134
		context_desc->upper_setup.tcp_fields.tucss = tucss;
3135
		context_desc->upper_setup.tcp_fields.tucso = tucso;
3136
		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3137
		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
3138
		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3139
		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3140
3141
		buffer_info->time_stamp = jiffies;
3142
		buffer_info->next_to_watch = i;
3143
3144
		i++;
3145
		if (i == tx_ring->count)
3146
			i = 0;
3147
		tx_ring->next_to_use = i;
3148
3149
		return 1;
3150
	}
3151
3152
	return 0;
3153
}
3154
3155
static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
3156
{
3157
	struct e1000_ring *tx_ring = adapter->tx_ring;
3158
	struct e1000_context_desc *context_desc;
3159
	struct e1000_buffer *buffer_info;
3160
	unsigned int i;
3161
	u8 css;
3162
3163
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3164
		css = skb_transport_offset(skb);
3165
3166
		i = tx_ring->next_to_use;
3167
		buffer_info = &tx_ring->buffer_info[i];
3168
		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3169
3170
		context_desc->lower_setup.ip_config = 0;
3171
		context_desc->upper_setup.tcp_fields.tucss = css;
3172
		context_desc->upper_setup.tcp_fields.tucso =
3173
					css + skb->csum_offset;
3174
		context_desc->upper_setup.tcp_fields.tucse = 0;
3175
		context_desc->tcp_seg_setup.data = 0;
3176
		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
3177
3178
		buffer_info->time_stamp = jiffies;
3179
		buffer_info->next_to_watch = i;
3180
3181
		i++;
3182
		if (i == tx_ring->count)
3183
			i = 0;
3184
		tx_ring->next_to_use = i;
3185
3186
		return 1;
3187
	}
3188
3189
	return 0;
3190
}
3191
3192
#define E1000_MAX_PER_TXD	8192
3193
#define E1000_MAX_TXD_PWR	12
3194
3195
static int e1000_tx_map(struct e1000_adapter *adapter,
3196
			struct sk_buff *skb, unsigned int first,
3197
			unsigned int max_per_txd, unsigned int nr_frags,
3198
			unsigned int mss)
3199
{
3200
	struct e1000_ring *tx_ring = adapter->tx_ring;
3201
	struct e1000_buffer *buffer_info;
3202
	unsigned int len = skb->len - skb->data_len;
3203
	unsigned int offset = 0, size, count = 0, i;
3204
	unsigned int f;
3205
3206
	i = tx_ring->next_to_use;
3207
3208
	while (len) {
3209
		buffer_info = &tx_ring->buffer_info[i];
3210
		size = min(len, max_per_txd);
3211
3212
		/* Workaround for premature desc write-backs
3213
		 * in TSO mode.  Append 4-byte sentinel desc */
3214
		if (mss && !nr_frags && size == len && size > 8)
3215
			size -= 4;
3216
3217
		buffer_info->length = size;
3218
		/* set time_stamp *before* dma to help avoid a possible race */
3219
		buffer_info->time_stamp = jiffies;
3220
		buffer_info->dma =
3221
			pci_map_single(adapter->pdev,
3222
				skb->data + offset,
3223
				size,
3224
				PCI_DMA_TODEVICE);
3225
		if (pci_dma_mapping_error(buffer_info->dma)) {
3226
			dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3227
			adapter->tx_dma_failed++;
3228
			return -1;
3229
		}
3230
		buffer_info->next_to_watch = i;
3231
3232
		len -= size;
3233
		offset += size;
3234
		count++;
3235
		i++;
3236
		if (i == tx_ring->count)
3237
			i = 0;
3238
	}
3239
3240
	for (f = 0; f < nr_frags; f++) {
3241
		struct skb_frag_struct *frag;
3242
3243
		frag = &skb_shinfo(skb)->frags[f];
3244
		len = frag->size;
3245
		offset = frag->page_offset;
3246
3247
		while (len) {
3248
			buffer_info = &tx_ring->buffer_info[i];
3249
			size = min(len, max_per_txd);
3250
			/* Workaround for premature desc write-backs
3251
			 * in TSO mode.  Append 4-byte sentinel desc */
3252
			if (mss && f == (nr_frags-1) && size == len && size > 8)
3253
				size -= 4;
3254
3255
			buffer_info->length = size;
3256
			buffer_info->time_stamp = jiffies;
3257
			buffer_info->dma =
3258
				pci_map_page(adapter->pdev,
3259
					frag->page,
3260
					offset,
3261
					size,
3262
					PCI_DMA_TODEVICE);
3263
			if (pci_dma_mapping_error(buffer_info->dma)) {
3264
				dev_err(&adapter->pdev->dev,
3265
					"TX DMA page map failed\n");
3266
				adapter->tx_dma_failed++;
3267
				return -1;
3268
			}
3269
3270
			buffer_info->next_to_watch = i;
3271
3272
			len -= size;
3273
			offset += size;
3274
			count++;
3275
3276
			i++;
3277
			if (i == tx_ring->count)
3278
				i = 0;
3279
		}
3280
	}
3281
3282
	if (i == 0)
3283
		i = tx_ring->count - 1;
3284
	else
3285
		i--;
3286
3287
	tx_ring->buffer_info[i].skb = skb;
3288
	tx_ring->buffer_info[first].next_to_watch = i;
3289
3290
	return count;
3291
}
3292
3293
static void e1000_tx_queue(struct e1000_adapter *adapter,
3294
			   int tx_flags, int count)
3295
{
3296
	struct e1000_ring *tx_ring = adapter->tx_ring;
3297
	struct e1000_tx_desc *tx_desc = NULL;
3298
	struct e1000_buffer *buffer_info;
3299
	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3300
	unsigned int i;
3301
3302
	if (tx_flags & E1000_TX_FLAGS_TSO) {
3303
		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3304
			     E1000_TXD_CMD_TSE;
3305
		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3306
3307
		if (tx_flags & E1000_TX_FLAGS_IPV4)
3308
			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3309
	}
3310
3311
	if (tx_flags & E1000_TX_FLAGS_CSUM) {
3312
		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3313
		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3314
	}
3315
3316
	if (tx_flags & E1000_TX_FLAGS_VLAN) {
3317
		txd_lower |= E1000_TXD_CMD_VLE;
3318
		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3319
	}
3320
3321
	i = tx_ring->next_to_use;
3322
3323
	while (count--) {
3324
		buffer_info = &tx_ring->buffer_info[i];
3325
		tx_desc = E1000_TX_DESC(*tx_ring, i);
3326
		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3327
		tx_desc->lower.data =
3328
			cpu_to_le32(txd_lower | buffer_info->length);
3329
		tx_desc->upper.data = cpu_to_le32(txd_upper);
3330
3331
		i++;
3332
		if (i == tx_ring->count)
3333
			i = 0;
3334
	}
3335
3336
	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3337
3338
	/* Force memory writes to complete before letting h/w
3339
	 * know there are new descriptors to fetch.  (Only
3340
	 * applicable for weak-ordered memory model archs,
3341
	 * such as IA-64). */
3342
	wmb();
3343
3344
	tx_ring->next_to_use = i;
3345
	writel(i, adapter->hw.hw_addr + tx_ring->tail);
3346
	/* we need this if more than one processor can write to our tail
3347
	 * at a time, it synchronizes IO on IA64/Altix systems */
3348
	mmiowb();
3349
}
3350
3351
#define MINIMUM_DHCP_PACKET_SIZE 282
3352
static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3353
				    struct sk_buff *skb)
3354
{
3355
	struct e1000_hw *hw =  &adapter->hw;
3356
	u16 length, offset;
3357
3358
	if (vlan_tx_tag_present(skb)) {
3359
		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
3360
		    && (adapter->hw.mng_cookie.status &
3361
			E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
3362
			return 0;
3363
	}
3364
3365
	if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
3366
		return 0;
3367
3368
	if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
3369
		return 0;
3370
3371
	{
3372
		const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
3373
		struct udphdr *udp;
3374
3375
		if (ip->protocol != IPPROTO_UDP)
3376
			return 0;
3377
3378
		udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
3379
		if (ntohs(udp->dest) != 67)
3380
			return 0;
3381
3382
		offset = (u8 *)udp + 8 - skb->data;
3383
		length = skb->len - offset;
3384
		return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
3385
	}
3386
3387
	return 0;
3388
}
3389
3390
static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3391
{
3392
	struct e1000_adapter *adapter = netdev_priv(netdev);
3393
3394
	netif_stop_queue(netdev);
3395
	/* Herbert's original patch had:
3396
	 *  smp_mb__after_netif_stop_queue();
3397
	 * but since that doesn't exist yet, just open code it. */
3398
	smp_mb();
3399
3400
	/* We need to check again in a case another CPU has just
3401
	 * made room available. */
3402
	if (e1000_desc_unused(adapter->tx_ring) < size)
3403
		return -EBUSY;
3404
3405
	/* A reprieve! */
3406
	netif_start_queue(netdev);
3407
	++adapter->restart_queue;
3408
	return 0;
3409
}
3410
3411
static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
3412
{
3413
	struct e1000_adapter *adapter = netdev_priv(netdev);
3414
3415
	if (e1000_desc_unused(adapter->tx_ring) >= size)
3416
		return 0;
3417
	return __e1000_maybe_stop_tx(netdev, size);
3418
}
3419
3420
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3421
static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3422
{
3423
	struct e1000_adapter *adapter = netdev_priv(netdev);
3424
	struct e1000_ring *tx_ring = adapter->tx_ring;
3425
	unsigned int first;
3426
	unsigned int max_per_txd = E1000_MAX_PER_TXD;
3427
	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3428
	unsigned int tx_flags = 0;
3429
	unsigned int len = skb->len;
3430
	unsigned long irq_flags;
3431
	unsigned int nr_frags = 0;
3432
	unsigned int mss = 0;
3433
	int count = 0;
3434
	int tso;
3435
	unsigned int f;
3436
	len -= skb->data_len;
3437
3438
	if (test_bit(__E1000_DOWN, &adapter->state)) {
3439
		dev_kfree_skb_any(skb);
3440
		return NETDEV_TX_OK;
3441
	}
3442
3443
	if (skb->len <= 0) {
3444
		dev_kfree_skb_any(skb);
3445
		return NETDEV_TX_OK;
3446
	}
3447
3448
	mss = skb_shinfo(skb)->gso_size;
3449
	/* The controller does a simple calculation to
3450
	 * make sure there is enough room in the FIFO before
3451
	 * initiating the DMA for each buffer.  The calc is:
3452
	 * 4 = ceil(buffer len/mss).  To make sure we don't
3453
	 * overrun the FIFO, adjust the max buffer len if mss
3454
	 * drops. */
3455
	if (mss) {
3456
		u8 hdr_len;
3457
		max_per_txd = min(mss << 2, max_per_txd);
3458
		max_txd_pwr = fls(max_per_txd) - 1;
3459
3460
		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3461
		* points to just header, pull a few bytes of payload from
3462
		* frags into skb->data */
3463
		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3464
		if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
3465
			unsigned int pull_size;
3466
3467
			pull_size = min((unsigned int)4, skb->data_len);
3468
			if (!__pskb_pull_tail(skb, pull_size)) {
3469
				ndev_err(netdev,
3470
					 "__pskb_pull_tail failed.\n");
3471
				dev_kfree_skb_any(skb);
3472
				return NETDEV_TX_OK;
3473
			}
3474
			len = skb->len - skb->data_len;
3475
		}
3476
	}
3477
3478
	/* reserve a descriptor for the offload context */
3479
	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3480
		count++;
3481
	count++;
3482
3483
	count += TXD_USE_COUNT(len, max_txd_pwr);
3484
3485
	nr_frags = skb_shinfo(skb)->nr_frags;
3486
	for (f = 0; f < nr_frags; f++)
3487
		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3488
				       max_txd_pwr);
3489
3490
	if (adapter->hw.mac.tx_pkt_filtering)
3491
		e1000_transfer_dhcp_info(adapter, skb);
3492
3493
	if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
3494
		/* Collision - tell upper layer to requeue */
3495
		return NETDEV_TX_LOCKED;
3496
3497
	/* need: count + 2 desc gap to keep tail from touching
3498
	 * head, otherwise try next time */
3499
	if (e1000_maybe_stop_tx(netdev, count + 2)) {
3500
		spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3501
		return NETDEV_TX_BUSY;
3502
	}
3503
3504
	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3505
		tx_flags |= E1000_TX_FLAGS_VLAN;
3506
		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3507
	}
3508
3509
	first = tx_ring->next_to_use;
3510
3511
	tso = e1000_tso(adapter, skb);
3512
	if (tso < 0) {
3513
		dev_kfree_skb_any(skb);
3514
		spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3515
		return NETDEV_TX_OK;
3516
	}
3517
3518
	if (tso)
3519
		tx_flags |= E1000_TX_FLAGS_TSO;
3520
	else if (e1000_tx_csum(adapter, skb))
3521
		tx_flags |= E1000_TX_FLAGS_CSUM;
3522
3523
	/* Old method was to assume IPv4 packet by default if TSO was enabled.
3524
	 * 82571 hardware supports TSO capabilities for IPv6 as well...
3525
	 * no longer assume, we must. */
3526
	if (skb->protocol == htons(ETH_P_IP))
3527
		tx_flags |= E1000_TX_FLAGS_IPV4;
3528
3529
	count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
3530
	if (count < 0) {
3531
		/* handle pci_map_single() error in e1000_tx_map */
3532
		dev_kfree_skb_any(skb);
3533
		spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3534
		return NETDEV_TX_BUSY;
3535
	}
3536
3537
	e1000_tx_queue(adapter, tx_flags, count);
3538
3539
	netdev->trans_start = jiffies;
3540
3541
	/* Make sure there is space in the ring for the next send. */
3542
	e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
3543
3544
	spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3545
	return NETDEV_TX_OK;
3546
}
3547
3548
/**
3549
 * e1000_tx_timeout - Respond to a Tx Hang
3550
 * @netdev: network interface device structure
3551
 **/
3552
static void e1000_tx_timeout(struct net_device *netdev)
3553
{
3554
	struct e1000_adapter *adapter = netdev_priv(netdev);
3555
3556
	/* Do the reset outside of interrupt context */
3557
	adapter->tx_timeout_count++;
3558
	schedule_work(&adapter->reset_task);
3559
}
3560
3561
static void e1000_reset_task(struct work_struct *work)
3562
{
3563
	struct e1000_adapter *adapter;
3564
	adapter = container_of(work, struct e1000_adapter, reset_task);
3565
3566
	e1000e_reinit_locked(adapter);
3567
}
3568
3569
/**
3570
 * e1000_get_stats - Get System Network Statistics
3571
 * @netdev: network interface device structure
3572
 *
3573
 * Returns the address of the device statistics structure.
3574
 * The statistics are actually updated from the timer callback.
3575
 **/
3576
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3577
{
3578
	struct e1000_adapter *adapter = netdev_priv(netdev);
3579
3580
	/* only return the current stats */
3581
	return &adapter->net_stats;
3582
}
3583
3584
/**
3585
 * e1000_change_mtu - Change the Maximum Transfer Unit
3586
 * @netdev: network interface device structure
3587
 * @new_mtu: new value for maximum frame size
3588
 *
3589
 * Returns 0 on success, negative on failure
3590
 **/
3591
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3592
{
3593
	struct e1000_adapter *adapter = netdev_priv(netdev);
3594
	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3595
3596
	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3597
	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3598
		ndev_err(netdev, "Invalid MTU setting\n");
3599
		return -EINVAL;
3600
	}
3601
3602
	/* Jumbo frame size limits */
3603
	if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
3604
		if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
3605
			ndev_err(netdev, "Jumbo Frames not supported.\n");
3606
			return -EINVAL;
3607
		}
3608
		if (adapter->hw.phy.type == e1000_phy_ife) {
3609
			ndev_err(netdev, "Jumbo Frames not supported.\n");
3610
			return -EINVAL;
3611
		}
3612
	}
3613
3614
#define MAX_STD_JUMBO_FRAME_SIZE 9234
3615
	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3616
		ndev_err(netdev, "MTU > 9216 not supported.\n");
3617
		return -EINVAL;
3618
	}
3619
3620
	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3621
		msleep(1);
3622
	/* e1000e_down has a dependency on max_frame_size */
3623
	adapter->hw.mac.max_frame_size = max_frame;
3624
	if (netif_running(netdev))
3625
		e1000e_down(adapter);
3626
3627
	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3628
	 * means we reserve 2 more, this pushes us to allocate from the next
3629
	 * larger slab size.
3630
	 * i.e. RXBUFFER_2048 --> size-4096 slab
3631
	 *  however with the new *_jumbo* routines, jumbo receives will use
3632
	 *  fragmented skbs */
3633
3634
	if (max_frame <= 256)
3635
		adapter->rx_buffer_len = 256;
3636
	else if (max_frame <= 512)
3637
		adapter->rx_buffer_len = 512;
3638
	else if (max_frame <= 1024)
3639
		adapter->rx_buffer_len = 1024;
3640
	else if (max_frame <= 2048)
3641
		adapter->rx_buffer_len = 2048;
3642
	else
3643
		adapter->rx_buffer_len = 4096;
3644
3645
	/* adjust allocation if LPE protects us, and we aren't using SBP */
3646
	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3647
	     (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
3648
		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3649
					 + ETH_FCS_LEN ;
3650
3651
	ndev_info(netdev, "changing MTU from %d to %d\n",
3652
		netdev->mtu, new_mtu);
3653
	netdev->mtu = new_mtu;
3654
3655
	if (netif_running(netdev))
3656
		e1000e_up(adapter);
3657
	else
3658
		e1000e_reset(adapter);
3659
3660
	clear_bit(__E1000_RESETTING, &adapter->state);
3661
3662
	return 0;
3663
}
3664
3665
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
3666
			   int cmd)
3667
{
3668
	struct e1000_adapter *adapter = netdev_priv(netdev);
3669
	struct mii_ioctl_data *data = if_mii(ifr);
3670
	unsigned long irq_flags;
3671
3672
	if (adapter->hw.media_type != e1000_media_type_copper)
3673
		return -EOPNOTSUPP;
3674
3675
	switch (cmd) {
3676
	case SIOCGMIIPHY:
3677
		data->phy_id = adapter->hw.phy.addr;
3678
		break;
3679
	case SIOCGMIIREG:
3680
		if (!capable(CAP_NET_ADMIN))
3681
			return -EPERM;
3682
		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
3683
		if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F,
3684
				   &data->val_out)) {
3685
			spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3686
			return -EIO;
3687
		}
3688
		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
3689
		break;
3690
	case SIOCSMIIREG:
3691
	default:
3692
		return -EOPNOTSUPP;
3693
	}
3694
	return 0;
3695
}
3696
3697
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3698
{
3699
	switch (cmd) {
3700
	case SIOCGMIIPHY:
3701
	case SIOCGMIIREG:
3702
	case SIOCSMIIREG:
3703
		return e1000_mii_ioctl(netdev, ifr, cmd);
3704
	default:
3705
		return -EOPNOTSUPP;
3706
	}
3707
}
3708
3709
static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3710
{
3711
	struct net_device *netdev = pci_get_drvdata(pdev);
3712
	struct e1000_adapter *adapter = netdev_priv(netdev);
3713
	struct e1000_hw *hw = &adapter->hw;
3714
	u32 ctrl, ctrl_ext, rctl, status;
3715
	u32 wufc = adapter->wol;
3716
	int retval = 0;
3717
3718
	netif_device_detach(netdev);
3719
3720
	if (netif_running(netdev)) {
3721
		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3722
		e1000e_down(adapter);
3723
		e1000_free_irq(adapter);
3724
	}
3725
3726
	retval = pci_save_state(pdev);
3727
	if (retval)
3728
		return retval;
3729
3730
	status = er32(STATUS);
3731
	if (status & E1000_STATUS_LU)
3732
		wufc &= ~E1000_WUFC_LNKC;
3733
3734
	if (wufc) {
3735
		e1000_setup_rctl(adapter);
3736
		e1000_set_multi(netdev);
3737
3738
		/* turn on all-multi mode if wake on multicast is enabled */
3739
		if (wufc & E1000_WUFC_MC) {
3740
			rctl = er32(RCTL);
3741
			rctl |= E1000_RCTL_MPE;
3742
			ew32(RCTL, rctl);
3743
		}
3744
3745
		ctrl = er32(CTRL);
3746
		/* advertise wake from D3Cold */
3747
		#define E1000_CTRL_ADVD3WUC 0x00100000
3748
		/* phy power management enable */
3749
		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
3750
		ctrl |= E1000_CTRL_ADVD3WUC |
3751
			E1000_CTRL_EN_PHY_PWR_MGMT;
3752
		ew32(CTRL, ctrl);
3753
3754
		if (adapter->hw.media_type == e1000_media_type_fiber ||
3755
		   adapter->hw.media_type == e1000_media_type_internal_serdes) {
3756
			/* keep the laser running in D3 */
3757
			ctrl_ext = er32(CTRL_EXT);
3758
			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3759
			ew32(CTRL_EXT, ctrl_ext);
3760
		}
3761
3762
		/* Allow time for pending master requests to run */
3763
		e1000e_disable_pcie_master(&adapter->hw);
3764
3765
		ew32(WUC, E1000_WUC_PME_EN);
3766
		ew32(WUFC, wufc);
3767
		pci_enable_wake(pdev, PCI_D3hot, 1);
3768
		pci_enable_wake(pdev, PCI_D3cold, 1);
3769
	} else {
3770
		ew32(WUC, 0);
3771
		ew32(WUFC, 0);
3772
		pci_enable_wake(pdev, PCI_D3hot, 0);
3773
		pci_enable_wake(pdev, PCI_D3cold, 0);
3774
	}
3775
3776
	e1000_release_manageability(adapter);
3777
3778
	/* make sure adapter isn't asleep if manageability is enabled */
3779
	if (adapter->flags & FLAG_MNG_PT_ENABLED) {
3780
		pci_enable_wake(pdev, PCI_D3hot, 1);
3781
		pci_enable_wake(pdev, PCI_D3cold, 1);
3782
	}
3783
3784
	if (adapter->hw.phy.type == e1000_phy_igp_3)
3785
		e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
3786
3787
	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
3788
	 * would have already happened in close and is redundant. */
3789
	e1000_release_hw_control(adapter);
3790
3791
	pci_disable_device(pdev);
3792
3793
	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3794
3795
	return 0;
3796
}
3797
3798
#ifdef CONFIG_PM
3799
static int e1000_resume(struct pci_dev *pdev)
3800
{
3801
	struct net_device *netdev = pci_get_drvdata(pdev);
3802
	struct e1000_adapter *adapter = netdev_priv(netdev);
3803
	struct e1000_hw *hw = &adapter->hw;
3804
	u32 err;
3805
3806
	pci_set_power_state(pdev, PCI_D0);
3807
	pci_restore_state(pdev);
3808
	err = pci_enable_device(pdev);
3809
	if (err) {
3810
		dev_err(&pdev->dev,
3811
			"Cannot enable PCI device from suspend\n");
3812
		return err;
3813
	}
3814
3815
	pci_set_master(pdev);
3816
3817
	pci_enable_wake(pdev, PCI_D3hot, 0);
3818
	pci_enable_wake(pdev, PCI_D3cold, 0);
3819
3820
	if (netif_running(netdev)) {
3821
		err = e1000_request_irq(adapter);
3822
		if (err)
3823
			return err;
3824
	}
3825
3826
	e1000e_power_up_phy(adapter);
3827
	e1000e_reset(adapter);
3828
	ew32(WUS, ~0);
3829
3830
	e1000_init_manageability(adapter);
3831
3832
	if (netif_running(netdev))
3833
		e1000e_up(adapter);
3834
3835
	netif_device_attach(netdev);
3836
3837
	/* If the controller has AMT, do not set DRV_LOAD until the interface
3838
	 * is up.  For all other cases, let the f/w know that the h/w is now
3839
	 * under the control of the driver. */
3840
	if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
3841
		e1000_get_hw_control(adapter);
3842
3843
	return 0;
3844
}
3845
#endif
3846
3847
static void e1000_shutdown(struct pci_dev *pdev)
3848
{
3849
	e1000_suspend(pdev, PMSG_SUSPEND);
3850
}
3851
3852
#ifdef CONFIG_NET_POLL_CONTROLLER
3853
/*
3854
 * Polling 'interrupt' - used by things like netconsole to send skbs
3855
 * without having to re-enable interrupts. It's not called while
3856
 * the interrupt routine is executing.
3857
 */
3858
static void e1000_netpoll(struct net_device *netdev)
3859
{
3860
	struct e1000_adapter *adapter = netdev_priv(netdev);
3861
3862
	disable_irq(adapter->pdev->irq);
3863
	e1000_intr(adapter->pdev->irq, netdev);
3864
3865
	e1000_clean_tx_irq(adapter);
3866
3867
	enable_irq(adapter->pdev->irq);
3868
}
3869
#endif
3870
3871
/**
3872
 * e1000_io_error_detected - called when PCI error is detected
3873
 * @pdev: Pointer to PCI device
3874
 * @state: The current pci connection state
3875
 *
3876
 * This function is called after a PCI bus error affecting
3877
 * this device has been detected.
3878
 */
3879
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
3880
						pci_channel_state_t state)
3881
{
3882
	struct net_device *netdev = pci_get_drvdata(pdev);
3883
	struct e1000_adapter *adapter = netdev_priv(netdev);
3884
3885
	netif_device_detach(netdev);
3886
3887
	if (netif_running(netdev))
3888
		e1000e_down(adapter);
3889
	pci_disable_device(pdev);
3890
3891
	/* Request a slot slot reset. */
3892
	return PCI_ERS_RESULT_NEED_RESET;
3893
}
3894
3895
/**
3896
 * e1000_io_slot_reset - called after the pci bus has been reset.
3897
 * @pdev: Pointer to PCI device
3898
 *
3899
 * Restart the card from scratch, as if from a cold-boot. Implementation
3900
 * resembles the first-half of the e1000_resume routine.
3901
 */
3902
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
3903
{
3904
	struct net_device *netdev = pci_get_drvdata(pdev);
3905
	struct e1000_adapter *adapter = netdev_priv(netdev);
3906
	struct e1000_hw *hw = &adapter->hw;
3907
3908
	if (pci_enable_device(pdev)) {
3909
		dev_err(&pdev->dev,
3910
			"Cannot re-enable PCI device after reset.\n");
3911
		return PCI_ERS_RESULT_DISCONNECT;
3912
	}
3913
	pci_set_master(pdev);
3914
3915
	pci_enable_wake(pdev, PCI_D3hot, 0);
3916
	pci_enable_wake(pdev, PCI_D3cold, 0);
3917
3918
	e1000e_reset(adapter);
3919
	ew32(WUS, ~0);
3920
3921
	return PCI_ERS_RESULT_RECOVERED;
3922
}
3923
3924
/**
3925
 * e1000_io_resume - called when traffic can start flowing again.
3926
 * @pdev: Pointer to PCI device
3927
 *
3928
 * This callback is called when the error recovery driver tells us that
3929
 * its OK to resume normal operation. Implementation resembles the
3930
 * second-half of the e1000_resume routine.
3931
 */
3932
static void e1000_io_resume(struct pci_dev *pdev)
3933
{
3934
	struct net_device *netdev = pci_get_drvdata(pdev);
3935
	struct e1000_adapter *adapter = netdev_priv(netdev);
3936
3937
	e1000_init_manageability(adapter);
3938
3939
	if (netif_running(netdev)) {
3940
		if (e1000e_up(adapter)) {
3941
			dev_err(&pdev->dev,
3942
				"can't bring device back up after reset\n");
3943
			return;
3944
		}
3945
	}
3946
3947
	netif_device_attach(netdev);
3948
3949
	/* If the controller has AMT, do not set DRV_LOAD until the interface
3950
	 * is up.  For all other cases, let the f/w know that the h/w is now
3951
	 * under the control of the driver. */
3952
	if (!(adapter->flags & FLAG_HAS_AMT) ||
3953
	    !e1000e_check_mng_mode(&adapter->hw))
3954
		e1000_get_hw_control(adapter);
3955
3956
}
3957
3958
static void e1000_print_device_info(struct e1000_adapter *adapter)
3959
{
3960
	struct e1000_hw *hw = &adapter->hw;
3961
	struct net_device *netdev = adapter->netdev;
3962
	u32 part_num;
3963
3964
	/* print bus type/speed/width info */
3965
	ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
3966
		  "%02x:%02x:%02x:%02x:%02x:%02x\n",
3967
		  /* bus width */
3968
		 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
3969
		  "Width x1"),
3970
		  /* MAC address */
3971
		  netdev->dev_addr[0], netdev->dev_addr[1],
3972
		  netdev->dev_addr[2], netdev->dev_addr[3],
3973
		  netdev->dev_addr[4], netdev->dev_addr[5]);
3974
	ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
3975
		  (hw->phy.type == e1000_phy_ife)
3976
		   ? "10/100" : "1000");
3977
	e1000e_read_part_num(hw, &part_num);
3978
	ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3979
		  hw->mac.type, hw->phy.type,
3980
		  (part_num >> 8), (part_num & 0xff));
3981
}
3982
3983
/**
3984
 * e1000_probe - Device Initialization Routine
3985
 * @pdev: PCI device information struct
3986
 * @ent: entry in e1000_pci_tbl
3987
 *
3988
 * Returns 0 on success, negative on failure
3989
 *
3990
 * e1000_probe initializes an adapter identified by a pci_dev structure.
3991
 * The OS initialization, configuring of the adapter private structure,
3992
 * and a hardware reset occur.
3993
 **/
3994
static int __devinit e1000_probe(struct pci_dev *pdev,
3995
				 const struct pci_device_id *ent)
3996
{
3997
	struct net_device *netdev;
3998
	struct e1000_adapter *adapter;
3999
	struct e1000_hw *hw;
4000
	const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
4001
	unsigned long mmio_start, mmio_len;
4002
	unsigned long flash_start, flash_len;
4003
4004
	static int cards_found;
4005
	int i, err, pci_using_dac;
4006
	u16 eeprom_data = 0;
4007
	u16 eeprom_apme_mask = E1000_EEPROM_APME;
4008
4009
	err = pci_enable_device(pdev);
4010
	if (err)
4011
		return err;
4012
4013
	pci_using_dac = 0;
4014
	err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
4015
	if (!err) {
4016
		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4017
		if (!err)
4018
			pci_using_dac = 1;
4019
	} else {
4020
		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4021
		if (err) {
4022
			err = pci_set_consistent_dma_mask(pdev,
4023
							  DMA_32BIT_MASK);
4024
			if (err) {
4025
				dev_err(&pdev->dev, "No usable DMA "
4026
					"configuration, aborting\n");
4027
				goto err_dma;
4028
			}
4029
		}
4030
	}
4031
4032
	err = pci_request_regions(pdev, e1000e_driver_name);
4033
	if (err)
4034
		goto err_pci_reg;
4035
4036
	pci_set_master(pdev);
4037
4038
	err = -ENOMEM;
4039
	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
4040
	if (!netdev)
4041
		goto err_alloc_etherdev;
4042
4043
	SET_MODULE_OWNER(netdev);
4044
	SET_NETDEV_DEV(netdev, &pdev->dev);
4045
4046
	pci_set_drvdata(pdev, netdev);
4047
	adapter = netdev_priv(netdev);
4048
	hw = &adapter->hw;
4049
	adapter->netdev = netdev;
4050
	adapter->pdev = pdev;
4051
	adapter->ei = ei;
4052
	adapter->pba = ei->pba;
4053
	adapter->flags = ei->flags;
4054
	adapter->hw.adapter = adapter;
4055
	adapter->hw.mac.type = ei->mac;
4056
	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4057
4058
	mmio_start = pci_resource_start(pdev, 0);
4059
	mmio_len = pci_resource_len(pdev, 0);
4060
4061
	err = -EIO;
4062
	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
4063
	if (!adapter->hw.hw_addr)
4064
		goto err_ioremap;
4065
4066
	if ((adapter->flags & FLAG_HAS_FLASH) &&
4067
	    (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
4068
		flash_start = pci_resource_start(pdev, 1);
4069
		flash_len = pci_resource_len(pdev, 1);
4070
		adapter->hw.flash_address = ioremap(flash_start, flash_len);
4071
		if (!adapter->hw.flash_address)
4072
			goto err_flashmap;
4073
	}
4074
4075
	/* construct the net_device struct */
4076
	netdev->open			= &e1000_open;
4077
	netdev->stop			= &e1000_close;
4078
	netdev->hard_start_xmit		= &e1000_xmit_frame;
4079
	netdev->get_stats		= &e1000_get_stats;
4080
	netdev->set_multicast_list	= &e1000_set_multi;
4081
	netdev->set_mac_address		= &e1000_set_mac;
4082
	netdev->change_mtu		= &e1000_change_mtu;
4083
	netdev->do_ioctl		= &e1000_ioctl;
4084
	e1000e_set_ethtool_ops(netdev);
4085
	netdev->tx_timeout		= &e1000_tx_timeout;
4086
	netdev->watchdog_timeo		= 5 * HZ;
4087
	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
4088
	netdev->vlan_rx_register	= e1000_vlan_rx_register;
4089
	netdev->vlan_rx_add_vid		= e1000_vlan_rx_add_vid;
4090
	netdev->vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid;
4091
#ifdef CONFIG_NET_POLL_CONTROLLER
4092
	netdev->poll_controller		= e1000_netpoll;
4093
#endif
4094
	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
4095
4096
	netdev->mem_start = mmio_start;
4097
	netdev->mem_end = mmio_start + mmio_len;
4098
4099
	adapter->bd_number = cards_found++;
4100
4101
	/* setup adapter struct */
4102
	err = e1000_sw_init(adapter);
4103
	if (err)
4104
		goto err_sw_init;
4105
4106
	err = -EIO;
4107
4108
	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
4109
	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
4110
	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
4111
4112
	err = ei->get_invariants(adapter);
4113
	if (err)
4114
		goto err_hw_init;
4115
4116
	hw->mac.ops.get_bus_info(&adapter->hw);
4117
4118
	adapter->hw.phy.wait_for_link = 0;
4119
4120
	/* Copper options */
4121
	if (adapter->hw.media_type == e1000_media_type_copper) {
4122
		adapter->hw.phy.mdix = AUTO_ALL_MODES;
4123
		adapter->hw.phy.disable_polarity_correction = 0;
4124
		adapter->hw.phy.ms_type = e1000_ms_hw_default;
4125
	}
4126
4127
	if (e1000_check_reset_block(&adapter->hw))
4128
		ndev_info(netdev,
4129
			  "PHY reset is blocked due to SOL/IDER session.\n");
4130
4131
	netdev->features = NETIF_F_SG |
4132
			   NETIF_F_HW_CSUM |
4133
			   NETIF_F_HW_VLAN_TX |
4134
			   NETIF_F_HW_VLAN_RX;
4135
4136
	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
4137
		netdev->features |= NETIF_F_HW_VLAN_FILTER;
4138
4139
	netdev->features |= NETIF_F_TSO;
4140
	netdev->features |= NETIF_F_TSO6;
4141
4142
	if (pci_using_dac)
4143
		netdev->features |= NETIF_F_HIGHDMA;
4144
4145
	/* We should not be using LLTX anymore, but we are still TX faster with
4146
	 * it. */
4147
	netdev->features |= NETIF_F_LLTX;
4148
4149
	if (e1000e_enable_mng_pass_thru(&adapter->hw))
4150
		adapter->flags |= FLAG_MNG_PT_ENABLED;
4151
4152
	/* before reading the NVM, reset the controller to
4153
	 * put the device in a known good starting state */
4154
	adapter->hw.mac.ops.reset_hw(&adapter->hw);
4155
4156
	/*
4157
	 * systems with ASPM and others may see the checksum fail on the first
4158
	 * attempt. Let's give it a few tries
4159
	 */
4160
	for (i = 0;; i++) {
4161
		if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
4162
			break;
4163
		if (i == 2) {
4164
			ndev_err(netdev, "The NVM Checksum Is Not Valid\n");
4165
			err = -EIO;
4166
			goto err_eeprom;
4167
		}
4168
	}
4169
4170
	/* copy the MAC address out of the NVM */
4171
	if (e1000e_read_mac_addr(&adapter->hw))
4172
		ndev_err(netdev, "NVM Read Error while reading MAC address\n");
4173
4174
	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
4175
	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4176
4177
	if (!is_valid_ether_addr(netdev->perm_addr)) {
4178
		ndev_err(netdev, "Invalid MAC Address: "
4179
			 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4180
			 netdev->perm_addr[0], netdev->perm_addr[1],
4181
			 netdev->perm_addr[2], netdev->perm_addr[3],
4182
			 netdev->perm_addr[4], netdev->perm_addr[5]);
4183
		err = -EIO;
4184
		goto err_eeprom;
4185
	}
4186
4187
	init_timer(&adapter->watchdog_timer);
4188
	adapter->watchdog_timer.function = &e1000_watchdog;
4189
	adapter->watchdog_timer.data = (unsigned long) adapter;
4190
4191
	init_timer(&adapter->phy_info_timer);
4192
	adapter->phy_info_timer.function = &e1000_update_phy_info;
4193
	adapter->phy_info_timer.data = (unsigned long) adapter;
4194
4195
	INIT_WORK(&adapter->reset_task, e1000_reset_task);
4196
	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
4197
4198
	e1000e_check_options(adapter);
4199
4200
	/* Initialize link parameters. User can change them with ethtool */
4201
	adapter->hw.mac.autoneg = 1;
4202
	adapter->hw.mac.original_fc = e1000_fc_default;
4203
	adapter->hw.mac.fc = e1000_fc_default;
4204
	adapter->hw.phy.autoneg_advertised = 0x2f;
4205
4206
	/* ring size defaults */
4207
	adapter->rx_ring->count = 256;
4208
	adapter->tx_ring->count = 256;
4209
4210
	/*
4211
	 * Initial Wake on LAN setting - If APM wake is enabled in
4212
	 * the EEPROM, enable the ACPI Magic Packet filter
4213
	 */
4214
	if (adapter->flags & FLAG_APME_IN_WUC) {
4215
		/* APME bit in EEPROM is mapped to WUC.APME */
4216
		eeprom_data = er32(WUC);
4217
		eeprom_apme_mask = E1000_WUC_APME;
4218
	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
4219
		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
4220
		    (adapter->hw.bus.func == 1))
4221
			e1000_read_nvm(&adapter->hw,
4222
				NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4223
		else
4224
			e1000_read_nvm(&adapter->hw,
4225
				NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4226
	}
4227
4228
	/* fetch WoL from EEPROM */
4229
	if (eeprom_data & eeprom_apme_mask)
4230
		adapter->eeprom_wol |= E1000_WUFC_MAG;
4231
4232
	/*
4233
	 * now that we have the eeprom settings, apply the special cases
4234
	 * where the eeprom may be wrong or the board simply won't support
4235
	 * wake on lan on a particular port
4236
	 */
4237
	if (!(adapter->flags & FLAG_HAS_WOL))
4238
		adapter->eeprom_wol = 0;
4239
4240
	/* initialize the wol settings based on the eeprom settings */
4241
	adapter->wol = adapter->eeprom_wol;
4242
4243
	/* reset the hardware with the new settings */
4244
	e1000e_reset(adapter);
4245
4246
	/* If the controller has AMT, do not set DRV_LOAD until the interface
4247
	 * is up.  For all other cases, let the f/w know that the h/w is now
4248
	 * under the control of the driver. */
4249
	if (!(adapter->flags & FLAG_HAS_AMT) ||
4250
	    !e1000e_check_mng_mode(&adapter->hw))
4251
		e1000_get_hw_control(adapter);
4252
4253
	/* tell the stack to leave us alone until e1000_open() is called */
4254
	netif_carrier_off(netdev);
4255
	netif_stop_queue(netdev);
4256
4257
	strcpy(netdev->name, "eth%d");
4258
	err = register_netdev(netdev);
4259
	if (err)
4260
		goto err_register;
4261
4262
	e1000_print_device_info(adapter);
4263
4264
	return 0;
4265
4266
err_register:
4267
err_hw_init:
4268
	e1000_release_hw_control(adapter);
4269
err_eeprom:
4270
	if (!e1000_check_reset_block(&adapter->hw))
4271
		e1000_phy_hw_reset(&adapter->hw);
4272
4273
	if (adapter->hw.flash_address)
4274
		iounmap(adapter->hw.flash_address);
4275
4276
err_flashmap:
4277
	kfree(adapter->tx_ring);
4278
	kfree(adapter->rx_ring);
4279
err_sw_init:
4280
	iounmap(adapter->hw.hw_addr);
4281
err_ioremap:
4282
	free_netdev(netdev);
4283
err_alloc_etherdev:
4284
	pci_release_regions(pdev);
4285
err_pci_reg:
4286
err_dma:
4287
	pci_disable_device(pdev);
4288
	return err;
4289
}
4290
4291
/**
4292
 * e1000_remove - Device Removal Routine
4293
 * @pdev: PCI device information struct
4294
 *
4295
 * e1000_remove is called by the PCI subsystem to alert the driver
4296
 * that it should release a PCI device.  The could be caused by a
4297
 * Hot-Plug event, or because the driver is going to be removed from
4298
 * memory.
4299
 **/
4300
static void __devexit e1000_remove(struct pci_dev *pdev)
4301
{
4302
	struct net_device *netdev = pci_get_drvdata(pdev);
4303
	struct e1000_adapter *adapter = netdev_priv(netdev);
4304
4305
	/* flush_scheduled work may reschedule our watchdog task, so
4306
	 * explicitly disable watchdog tasks from being rescheduled  */
4307
	set_bit(__E1000_DOWN, &adapter->state);
4308
	del_timer_sync(&adapter->watchdog_timer);
4309
	del_timer_sync(&adapter->phy_info_timer);
4310
4311
	flush_scheduled_work();
4312
4313
	e1000_release_manageability(adapter);
4314
4315
	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
4316
	 * would have already happened in close and is redundant. */
4317
	e1000_release_hw_control(adapter);
4318
4319
	unregister_netdev(netdev);
4320
4321
	if (!e1000_check_reset_block(&adapter->hw))
4322
		e1000_phy_hw_reset(&adapter->hw);
4323
4324
	kfree(adapter->tx_ring);
4325
	kfree(adapter->rx_ring);
4326
4327
	iounmap(adapter->hw.hw_addr);
4328
	if (adapter->hw.flash_address)
4329
		iounmap(adapter->hw.flash_address);
4330
	pci_release_regions(pdev);
4331
4332
	free_netdev(netdev);
4333
4334
	pci_disable_device(pdev);
4335
}
4336
4337
/* PCI Error Recovery (ERS) */
4338
static struct pci_error_handlers e1000_err_handler = {
4339
	.error_detected = e1000_io_error_detected,
4340
	.slot_reset = e1000_io_slot_reset,
4341
	.resume = e1000_io_resume,
4342
};
4343
4344
static struct pci_device_id e1000_pci_tbl[] = {
4345
	/*
4346
	 * Support for 82571/2/3, es2lan and ich8 will be phased in
4347
	 * stepwise.
4348
4349
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
4350
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
4351
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
4352
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
4353
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
4354
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
4355
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
4356
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
4357
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
4358
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
4359
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
4360
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4361
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4362
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4363
	  board_80003es2lan },
4364
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
4365
	  board_80003es2lan },
4366
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
4367
	  board_80003es2lan },
4368
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
4369
	  board_80003es2lan },
4370
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
4371
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
4372
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
4373
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
4374
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
4375
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
4376
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
4377
	*/
4378
4379
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
4380
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
4381
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4382
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4383
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
4384
4385
	{ }	/* terminate list */
4386
};
4387
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
4388
4389
/* PCI Device API Driver */
4390
static struct pci_driver e1000_driver = {
4391
	.name     = e1000e_driver_name,
4392
	.id_table = e1000_pci_tbl,
4393
	.probe    = e1000_probe,
4394
	.remove   = __devexit_p(e1000_remove),
4395
#ifdef CONFIG_PM
4396
	/* Power Managment Hooks */
4397
	.suspend  = e1000_suspend,
4398
	.resume   = e1000_resume,
4399
#endif
4400
	.shutdown = e1000_shutdown,
4401
	.err_handler = &e1000_err_handler
4402
};
4403
4404
/**
4405
 * e1000_init_module - Driver Registration Routine
4406
 *
4407
 * e1000_init_module is the first routine called when the driver is
4408
 * loaded. All it does is register with the PCI subsystem.
4409
 **/
4410
static int __init e1000_init_module(void)
4411
{
4412
	int ret;
4413
	printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
4414
	       e1000e_driver_name, e1000e_driver_version);
4415
	printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n",
4416
	       e1000e_driver_name);
4417
	ret = pci_register_driver(&e1000_driver);
4418
4419
	return ret;
4420
}
4421
module_init(e1000_init_module);
4422
4423
/**
4424
 * e1000_exit_module - Driver Exit Cleanup Routine
4425
 *
4426
 * e1000_exit_module is called just before the driver is removed
4427
 * from memory.
4428
 **/
4429
static void __exit e1000_exit_module(void)
4430
{
4431
	pci_unregister_driver(&e1000_driver);
4432
}
4433
module_exit(e1000_exit_module);
4434
4435
4436
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
4437
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
4438
MODULE_LICENSE("GPL");
4439
MODULE_VERSION(DRV_VERSION);
4440
4441
/* e1000_main.c */
(-)a/drivers/net/e1000e/param.c (+382 lines)
Line 0 Link Here
1
/*******************************************************************************
2
3
  Intel PRO/1000 Linux driver
4
  Copyright(c) 1999 - 2007 Intel Corporation.
5
6
  This program is free software; you can redistribute it and/or modify it
7
  under the terms and conditions of the GNU General Public License,
8
  version 2, as published by the Free Software Foundation.
9
10
  This program is distributed in the hope it will be useful, but WITHOUT
11
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13
  more details.
14
15
  You should have received a copy of the GNU General Public License along with
16
  this program; if not, write to the Free Software Foundation, Inc.,
17
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19
  The full GNU General Public License is included in this distribution in
20
  the file called "COPYING".
21
22
  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27
*******************************************************************************/
28
29
#include <linux/netdevice.h>
30
31
#include "e1000.h"
32
33
/* This is the only thing that needs to be changed to adjust the
34
 * maximum number of ports that the driver can manage.
35
 */
36
37
#define E1000_MAX_NIC 32
38
39
#define OPTION_UNSET   -1
40
#define OPTION_DISABLED 0
41
#define OPTION_ENABLED  1
42
43
#define COPYBREAK_DEFAULT 256
44
unsigned int copybreak = COPYBREAK_DEFAULT;
45
module_param(copybreak, uint, 0644);
46
MODULE_PARM_DESC(copybreak,
47
	"Maximum size of packet that is copied to a new buffer on receive");
48
49
/* All parameters are treated the same, as an integer array of values.
50
 * This macro just reduces the need to repeat the same declaration code
51
 * over and over (plus this helps to avoid typo bugs).
52
 */
53
54
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
55
#define E1000_PARAM(X, desc) \
56
	static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
57
	static int num_##X; \
58
	module_param_array_named(X, X, int, &num_##X, 0); \
59
	MODULE_PARM_DESC(X, desc);
60
61
62
/* Transmit Interrupt Delay in units of 1.024 microseconds
63
 *  Tx interrupt delay needs to typically be set to something non zero
64
 *
65
 * Valid Range: 0-65535
66
 */
67
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
68
#define DEFAULT_TIDV 8
69
#define MAX_TXDELAY 0xFFFF
70
#define MIN_TXDELAY 0
71
72
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
73
 *
74
 * Valid Range: 0-65535
75
 */
76
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
77
#define DEFAULT_TADV 32
78
#define MAX_TXABSDELAY 0xFFFF
79
#define MIN_TXABSDELAY 0
80
81
/* Receive Interrupt Delay in units of 1.024 microseconds
82
 *   hardware will likely hang if you set this to anything but zero.
83
 *
84
 * Valid Range: 0-65535
85
 */
86
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
87
#define DEFAULT_RDTR 0
88
#define MAX_RXDELAY 0xFFFF
89
#define MIN_RXDELAY 0
90
91
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
92
 *
93
 * Valid Range: 0-65535
94
 */
95
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
96
#define DEFAULT_RADV 8
97
#define MAX_RXABSDELAY 0xFFFF
98
#define MIN_RXABSDELAY 0
99
100
/* Interrupt Throttle Rate (interrupts/sec)
101
 *
102
 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
103
 */
104
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
105
#define DEFAULT_ITR 3
106
#define MAX_ITR 100000
107
#define MIN_ITR 100
108
109
/* Enable Smart Power Down of the PHY
110
 *
111
 * Valid Range: 0, 1
112
 *
113
 * Default Value: 0 (disabled)
114
 */
115
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
116
117
/* Enable Kumeran Lock Loss workaround
118
 *
119
 * Valid Range: 0, 1
120
 *
121
 * Default Value: 1 (enabled)
122
 */
123
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
124
125
struct e1000_option {
126
	enum { enable_option, range_option, list_option } type;
127
	char *name;
128
	char *err;
129
	int  def;
130
	union {
131
		struct { /* range_option info */
132
			int min;
133
			int max;
134
		} r;
135
		struct { /* list_option info */
136
			int nr;
137
			struct e1000_opt_list { int i; char *str; } *p;
138
		} l;
139
	} arg;
140
};
141
142
static int __devinit e1000_validate_option(int *value,
143
					   struct e1000_option *opt,
144
					   struct e1000_adapter *adapter)
145
{
146
	if (*value == OPTION_UNSET) {
147
		*value = opt->def;
148
		return 0;
149
	}
150
151
	switch (opt->type) {
152
	case enable_option:
153
		switch (*value) {
154
		case OPTION_ENABLED:
155
			ndev_info(adapter->netdev, "%s Enabled\n", opt->name);
156
			return 0;
157
		case OPTION_DISABLED:
158
			ndev_info(adapter->netdev, "%s Disabled\n", opt->name);
159
			return 0;
160
		}
161
		break;
162
	case range_option:
163
		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
164
			ndev_info(adapter->netdev,
165
					"%s set to %i\n", opt->name, *value);
166
			return 0;
167
		}
168
		break;
169
	case list_option: {
170
		int i;
171
		struct e1000_opt_list *ent;
172
173
		for (i = 0; i < opt->arg.l.nr; i++) {
174
			ent = &opt->arg.l.p[i];
175
			if (*value == ent->i) {
176
				if (ent->str[0] != '\0')
177
					ndev_info(adapter->netdev, "%s\n",
178
						  ent->str);
179
				return 0;
180
			}
181
		}
182
	}
183
		break;
184
	default:
185
		BUG();
186
	}
187
188
	ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n",
189
	       opt->name, *value, opt->err);
190
	*value = opt->def;
191
	return -1;
192
}
193
194
/**
195
 * e1000e_check_options - Range Checking for Command Line Parameters
196
 * @adapter: board private structure
197
 *
198
 * This routine checks all command line parameters for valid user
199
 * input.  If an invalid value is given, or if no user specified
200
 * value exists, a default value is used.  The final value is stored
201
 * in a variable in the adapter structure.
202
 **/
203
void __devinit e1000e_check_options(struct e1000_adapter *adapter)
204
{
205
	struct e1000_hw *hw = &adapter->hw;
206
	struct net_device *netdev = adapter->netdev;
207
	int bd = adapter->bd_number;
208
209
	if (bd >= E1000_MAX_NIC) {
210
		ndev_notice(netdev,
211
		       "Warning: no configuration for board #%i\n", bd);
212
		ndev_notice(netdev, "Using defaults for all values\n");
213
	}
214
215
	{ /* Transmit Interrupt Delay */
216
		struct e1000_option opt = {
217
			.type = range_option,
218
			.name = "Transmit Interrupt Delay",
219
			.err  = "using default of "
220
				__MODULE_STRING(DEFAULT_TIDV),
221
			.def  = DEFAULT_TIDV,
222
			.arg  = { .r = { .min = MIN_TXDELAY,
223
					 .max = MAX_TXDELAY } }
224
		};
225
226
		if (num_TxIntDelay > bd) {
227
			adapter->tx_int_delay = TxIntDelay[bd];
228
			e1000_validate_option(&adapter->tx_int_delay, &opt,
229
					      adapter);
230
		} else {
231
			adapter->tx_int_delay = opt.def;
232
		}
233
	}
234
	{ /* Transmit Absolute Interrupt Delay */
235
		struct e1000_option opt = {
236
			.type = range_option,
237
			.name = "Transmit Absolute Interrupt Delay",
238
			.err  = "using default of "
239
				__MODULE_STRING(DEFAULT_TADV),
240
			.def  = DEFAULT_TADV,
241
			.arg  = { .r = { .min = MIN_TXABSDELAY,
242
					 .max = MAX_TXABSDELAY } }
243
		};
244
245
		if (num_TxAbsIntDelay > bd) {
246
			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
247
			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
248
					      adapter);
249
		} else {
250
			adapter->tx_abs_int_delay = opt.def;
251
		}
252
	}
253
	{ /* Receive Interrupt Delay */
254
		struct e1000_option opt = {
255
			.type = range_option,
256
			.name = "Receive Interrupt Delay",
257
			.err  = "using default of "
258
				__MODULE_STRING(DEFAULT_RDTR),
259
			.def  = DEFAULT_RDTR,
260
			.arg  = { .r = { .min = MIN_RXDELAY,
261
					 .max = MAX_RXDELAY } }
262
		};
263
264
		/* modify min and default if 82573 for slow ping w/a,
265
		 * a value greater than 8 needs to be set for RDTR */
266
		if (adapter->flags & FLAG_HAS_ASPM) {
267
			opt.def = 32;
268
			opt.arg.r.min = 8;
269
		}
270
271
		if (num_RxIntDelay > bd) {
272
			adapter->rx_int_delay = RxIntDelay[bd];
273
			e1000_validate_option(&adapter->rx_int_delay, &opt,
274
					      adapter);
275
		} else {
276
			adapter->rx_int_delay = opt.def;
277
		}
278
	}
279
	{ /* Receive Absolute Interrupt Delay */
280
		struct e1000_option opt = {
281
			.type = range_option,
282
			.name = "Receive Absolute Interrupt Delay",
283
			.err  = "using default of "
284
				__MODULE_STRING(DEFAULT_RADV),
285
			.def  = DEFAULT_RADV,
286
			.arg  = { .r = { .min = MIN_RXABSDELAY,
287
					 .max = MAX_RXABSDELAY } }
288
		};
289
290
		if (num_RxAbsIntDelay > bd) {
291
			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
292
			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
293
					      adapter);
294
		} else {
295
			adapter->rx_abs_int_delay = opt.def;
296
		}
297
	}
298
	{ /* Interrupt Throttling Rate */
299
		struct e1000_option opt = {
300
			.type = range_option,
301
			.name = "Interrupt Throttling Rate (ints/sec)",
302
			.err  = "using default of "
303
				__MODULE_STRING(DEFAULT_ITR),
304
			.def  = DEFAULT_ITR,
305
			.arg  = { .r = { .min = MIN_ITR,
306
					 .max = MAX_ITR } }
307
		};
308
309
		if (num_InterruptThrottleRate > bd) {
310
			adapter->itr = InterruptThrottleRate[bd];
311
			switch (adapter->itr) {
312
			case 0:
313
				ndev_info(netdev, "%s turned off\n",
314
					opt.name);
315
				break;
316
			case 1:
317
				ndev_info(netdev,
318
					  "%s set to dynamic mode\n",
319
					  opt.name);
320
				adapter->itr_setting = adapter->itr;
321
				adapter->itr = 20000;
322
				break;
323
			case 3:
324
				ndev_info(netdev,
325
					"%s set to dynamic conservative mode\n",
326
					opt.name);
327
				adapter->itr_setting = adapter->itr;
328
				adapter->itr = 20000;
329
				break;
330
			default:
331
				e1000_validate_option(&adapter->itr, &opt,
332
					adapter);
333
				/*
334
				 * save the setting, because the dynamic bits
335
				 * change itr. clear the lower two bits
336
				 * because they are used as control
337
				 */
338
				adapter->itr_setting = adapter->itr & ~3;
339
				break;
340
			}
341
		} else {
342
			adapter->itr_setting = opt.def;
343
			adapter->itr = 20000;
344
		}
345
	}
346
	{ /* Smart Power Down */
347
		struct e1000_option opt = {
348
			.type = enable_option,
349
			.name = "PHY Smart Power Down",
350
			.err  = "defaulting to Disabled",
351
			.def  = OPTION_DISABLED
352
		};
353
354
		if (num_SmartPowerDownEnable > bd) {
355
			int spd = SmartPowerDownEnable[bd];
356
			e1000_validate_option(&spd, &opt, adapter);
357
			if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
358
			    && spd)
359
				adapter->flags |= FLAG_SMART_POWER_DOWN;
360
		}
361
	}
362
	{ /* Kumeran Lock Loss Workaround */
363
		struct e1000_option opt = {
364
			.type = enable_option,
365
			.name = "Kumeran Lock Loss Workaround",
366
			.err  = "defaulting to Enabled",
367
			.def  = OPTION_ENABLED
368
		};
369
370
		if (num_KumeranLockLoss > bd) {
371
			int kmrn_lock_loss = KumeranLockLoss[bd];
372
			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
373
			if (hw->mac.type == e1000_ich8lan)
374
				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
375
								kmrn_lock_loss);
376
		} else {
377
			if (hw->mac.type == e1000_ich8lan)
378
				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
379
								       opt.def);
380
		}
381
	}
382
}
(-)a/drivers/net/e1000e/phy.c (+1773 lines)
Line 0 Link Here
1
/*******************************************************************************
2
3
  Intel PRO/1000 Linux driver
4
  Copyright(c) 1999 - 2007 Intel Corporation.
5
6
  This program is free software; you can redistribute it and/or modify it
7
  under the terms and conditions of the GNU General Public License,
8
  version 2, as published by the Free Software Foundation.
9
10
  This program is distributed in the hope it will be useful, but WITHOUT
11
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13
  more details.
14
15
  You should have received a copy of the GNU General Public License along with
16
  this program; if not, write to the Free Software Foundation, Inc.,
17
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19
  The full GNU General Public License is included in this distribution in
20
  the file called "COPYING".
21
22
  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27
*******************************************************************************/
28
29
#include <linux/delay.h>
30
31
#include "e1000.h"
32
33
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
34
static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
35
static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
36
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
37
38
/* Cable length tables */
39
static const u16 e1000_m88_cable_length_table[] =
40
	{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
41
42
static const u16 e1000_igp_2_cable_length_table[] =
43
	{ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
44
	  6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
45
	  26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
46
	  44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
47
	  66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
48
	  87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
49
	  100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
50
	  124};
51
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
52
		(sizeof(e1000_igp_2_cable_length_table) / \
53
		 sizeof(e1000_igp_2_cable_length_table[0]))
54
55
/**
56
 *  e1000e_check_reset_block_generic - Check if PHY reset is blocked
57
 *  @hw: pointer to the HW structure
58
 *
59
 *  Read the PHY management control register and check whether a PHY reset
60
 *  is blocked.  If a reset is not blocked return 0, otherwise
61
 *  return E1000_BLK_PHY_RESET (12).
62
 **/
63
s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
64
{
65
	u32 manc;
66
67
	manc = er32(MANC);
68
69
	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
70
	       E1000_BLK_PHY_RESET : 0;
71
}
72
73
/**
74
 *  e1000e_get_phy_id - Retrieve the PHY ID and revision
75
 *  @hw: pointer to the HW structure
76
 *
77
 *  Reads the PHY registers and stores the PHY ID and possibly the PHY
78
 *  revision in the hardware structure.
79
 **/
80
s32 e1000e_get_phy_id(struct e1000_hw *hw)
81
{
82
	struct e1000_phy_info *phy = &hw->phy;
83
	s32 ret_val;
84
	u16 phy_id;
85
86
	ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
87
	if (ret_val)
88
		return ret_val;
89
90
	phy->id = (u32)(phy_id << 16);
91
	udelay(20);
92
	ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
93
	if (ret_val)
94
		return ret_val;
95
96
	phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
97
	phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
98
99
	return 0;
100
}
101
102
/**
103
 *  e1000e_phy_reset_dsp - Reset PHY DSP
104
 *  @hw: pointer to the HW structure
105
 *
106
 *  Reset the digital signal processor.
107
 **/
108
s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
109
{
110
	s32 ret_val;
111
112
	ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
113
	if (ret_val)
114
		return ret_val;
115
116
	return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
117
}
118
119
/**
120
 *  e1000_read_phy_reg_mdic - Read MDI control register
121
 *  @hw: pointer to the HW structure
122
 *  @offset: register offset to be read
123
 *  @data: pointer to the read data
124
 *
125
 *  Reads the MDI control regsiter in the PHY at offset and stores the
126
 *  information read to data.
127
 **/
128
static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
129
{
130
	struct e1000_phy_info *phy = &hw->phy;
131
	u32 i, mdic = 0;
132
133
	if (offset > MAX_PHY_REG_ADDRESS) {
134
		hw_dbg(hw, "PHY Address %d is out of range\n", offset);
135
		return -E1000_ERR_PARAM;
136
	}
137
138
	/* Set up Op-code, Phy Address, and register offset in the MDI
139
	 * Control register.  The MAC will take care of interfacing with the
140
	 * PHY to retrieve the desired data.
141
	 */
142
	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
143
		(phy->addr << E1000_MDIC_PHY_SHIFT) |
144
		(E1000_MDIC_OP_READ));
145
146
	ew32(MDIC, mdic);
147
148
	/* Poll the ready bit to see if the MDI read completed */
149
	for (i = 0; i < 64; i++) {
150
		udelay(50);
151
		mdic = er32(MDIC);
152
		if (mdic & E1000_MDIC_READY)
153
			break;
154
	}
155
	if (!(mdic & E1000_MDIC_READY)) {
156
		hw_dbg(hw, "MDI Read did not complete\n");
157
		return -E1000_ERR_PHY;
158
	}
159
	if (mdic & E1000_MDIC_ERROR) {
160
		hw_dbg(hw, "MDI Error\n");
161
		return -E1000_ERR_PHY;
162
	}
163
	*data = (u16) mdic;
164
165
	return 0;
166
}
167
168
/**
169
 *  e1000_write_phy_reg_mdic - Write MDI control register
170
 *  @hw: pointer to the HW structure
171
 *  @offset: register offset to write to
172
 *  @data: data to write to register at offset
173
 *
174
 *  Writes data to MDI control register in the PHY at offset.
175
 **/
176
static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
177
{
178
	struct e1000_phy_info *phy = &hw->phy;
179
	u32 i, mdic = 0;
180
181
	if (offset > MAX_PHY_REG_ADDRESS) {
182
		hw_dbg(hw, "PHY Address %d is out of range\n", offset);
183
		return -E1000_ERR_PARAM;
184
	}
185
186
	/* Set up Op-code, Phy Address, and register offset in the MDI
187
	 * Control register.  The MAC will take care of interfacing with the
188
	 * PHY to retrieve the desired data.
189
	 */
190
	mdic = (((u32)data) |
191
		(offset << E1000_MDIC_REG_SHIFT) |
192
		(phy->addr << E1000_MDIC_PHY_SHIFT) |
193
		(E1000_MDIC_OP_WRITE));
194
195
	ew32(MDIC, mdic);
196
197
	/* Poll the ready bit to see if the MDI read completed */
198
	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
199
		udelay(5);
200
		mdic = er32(MDIC);
201
		if (mdic & E1000_MDIC_READY)
202
			break;
203
	}
204
	if (!(mdic & E1000_MDIC_READY)) {
205
		hw_dbg(hw, "MDI Write did not complete\n");
206
		return -E1000_ERR_PHY;
207
	}
208
209
	return 0;
210
}
211
212
/**
213
 *  e1000e_read_phy_reg_m88 - Read m88 PHY register
214
 *  @hw: pointer to the HW structure
215
 *  @offset: register offset to be read
216
 *  @data: pointer to the read data
217
 *
218
 *  Acquires semaphore, if necessary, then reads the PHY register at offset
219
 *  and storing the retrieved information in data.  Release any acquired
220
 *  semaphores before exiting.
221
 **/
222
s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
223
{
224
	s32 ret_val;
225
226
	ret_val = hw->phy.ops.acquire_phy(hw);
227
	if (ret_val)
228
		return ret_val;
229
230
	ret_val = e1000_read_phy_reg_mdic(hw,
231
					  MAX_PHY_REG_ADDRESS & offset,
232
					  data);
233
234
	hw->phy.ops.release_phy(hw);
235
236
	return ret_val;
237
}
238
239
/**
240
 *  e1000e_write_phy_reg_m88 - Write m88 PHY register
241
 *  @hw: pointer to the HW structure
242
 *  @offset: register offset to write to
243
 *  @data: data to write at register offset
244
 *
245
 *  Acquires semaphore, if necessary, then writes the data to PHY register
246
 *  at the offset.  Release any acquired semaphores before exiting.
247
 **/
248
s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
249
{
250
	s32 ret_val;
251
252
	ret_val = hw->phy.ops.acquire_phy(hw);
253
	if (ret_val)
254
		return ret_val;
255
256
	ret_val = e1000_write_phy_reg_mdic(hw,
257
					   MAX_PHY_REG_ADDRESS & offset,
258
					   data);
259
260
	hw->phy.ops.release_phy(hw);
261
262
	return ret_val;
263
}
264
265
/**
266
 *  e1000e_read_phy_reg_igp - Read igp PHY register
267
 *  @hw: pointer to the HW structure
268
 *  @offset: register offset to be read
269
 *  @data: pointer to the read data
270
 *
271
 *  Acquires semaphore, if necessary, then reads the PHY register at offset
272
 *  and storing the retrieved information in data.  Release any acquired
273
 *  semaphores before exiting.
274
 **/
275
s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
276
{
277
	s32 ret_val;
278
279
	ret_val = hw->phy.ops.acquire_phy(hw);
280
	if (ret_val)
281
		return ret_val;
282
283
	if (offset > MAX_PHY_MULTI_PAGE_REG) {
284
		ret_val = e1000_write_phy_reg_mdic(hw,
285
						   IGP01E1000_PHY_PAGE_SELECT,
286
						   (u16)offset);
287
		if (ret_val) {
288
			hw->phy.ops.release_phy(hw);
289
			return ret_val;
290
		}
291
	}
292
293
	ret_val = e1000_read_phy_reg_mdic(hw,
294
					  MAX_PHY_REG_ADDRESS & offset,
295
					  data);
296
297
	hw->phy.ops.release_phy(hw);
298
299
	return ret_val;
300
}
301
302
/**
303
 *  e1000e_write_phy_reg_igp - Write igp PHY register
304
 *  @hw: pointer to the HW structure
305
 *  @offset: register offset to write to
306
 *  @data: data to write at register offset
307
 *
308
 *  Acquires semaphore, if necessary, then writes the data to PHY register
309
 *  at the offset.  Release any acquired semaphores before exiting.
310
 **/
311
s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
312
{
313
	s32 ret_val;
314
315
	ret_val = hw->phy.ops.acquire_phy(hw);
316
	if (ret_val)
317
		return ret_val;
318
319
	if (offset > MAX_PHY_MULTI_PAGE_REG) {
320
		ret_val = e1000_write_phy_reg_mdic(hw,
321
						   IGP01E1000_PHY_PAGE_SELECT,
322
						   (u16)offset);
323
		if (ret_val) {
324
			hw->phy.ops.release_phy(hw);
325
			return ret_val;
326
		}
327
	}
328
329
	ret_val = e1000_write_phy_reg_mdic(hw,
330
					   MAX_PHY_REG_ADDRESS & offset,
331
					   data);
332
333
	hw->phy.ops.release_phy(hw);
334
335
	return ret_val;
336
}
337
338
/**
339
 *  e1000e_read_kmrn_reg - Read kumeran register
340
 *  @hw: pointer to the HW structure
341
 *  @offset: register offset to be read
342
 *  @data: pointer to the read data
343
 *
344
 *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
345
 *  using the kumeran interface.  The information retrieved is stored in data.
346
 *  Release any acquired semaphores before exiting.
347
 **/
348
s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
349
{
350
	u32 kmrnctrlsta;
351
	s32 ret_val;
352
353
	ret_val = hw->phy.ops.acquire_phy(hw);
354
	if (ret_val)
355
		return ret_val;
356
357
	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
358
		       E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
359
	ew32(KMRNCTRLSTA, kmrnctrlsta);
360
361
	udelay(2);
362
363
	kmrnctrlsta = er32(KMRNCTRLSTA);
364
	*data = (u16)kmrnctrlsta;
365
366
	hw->phy.ops.release_phy(hw);
367
368
	return ret_val;
369
}
370
371
/**
372
 *  e1000e_write_kmrn_reg - Write kumeran register
373
 *  @hw: pointer to the HW structure
374
 *  @offset: register offset to write to
375
 *  @data: data to write at register offset
376
 *
377
 *  Acquires semaphore, if necessary.  Then write the data to PHY register
378
 *  at the offset using the kumeran interface.  Release any acquired semaphores
379
 *  before exiting.
380
 **/
381
s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
382
{
383
	u32 kmrnctrlsta;
384
	s32 ret_val;
385
386
	ret_val = hw->phy.ops.acquire_phy(hw);
387
	if (ret_val)
388
		return ret_val;
389
390
	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
391
		       E1000_KMRNCTRLSTA_OFFSET) | data;
392
	ew32(KMRNCTRLSTA, kmrnctrlsta);
393
394
	udelay(2);
395
	hw->phy.ops.release_phy(hw);
396
397
	return ret_val;
398
}
399
400
/**
401
 *  e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
402
 *  @hw: pointer to the HW structure
403
 *
404
 *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
405
 *  and downshift values are set also.
406
 **/
407
s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
408
{
409
	struct e1000_phy_info *phy = &hw->phy;
410
	s32 ret_val;
411
	u16 phy_data;
412
413
	/* Enable CRS on TX. This must be set for half-duplex operation. */
414
	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
415
	if (ret_val)
416
		return ret_val;
417
418
	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
419
420
	/* Options:
421
	 *   MDI/MDI-X = 0 (default)
422
	 *   0 - Auto for all speeds
423
	 *   1 - MDI mode
424
	 *   2 - MDI-X mode
425
	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
426
	 */
427
	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
428
429
	switch (phy->mdix) {
430
	case 1:
431
		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
432
		break;
433
	case 2:
434
		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
435
		break;
436
	case 3:
437
		phy_data |= M88E1000_PSCR_AUTO_X_1000T;
438
		break;
439
	case 0:
440
	default:
441
		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
442
		break;
443
	}
444
445
	/* Options:
446
	 *   disable_polarity_correction = 0 (default)
447
	 *       Automatic Correction for Reversed Cable Polarity
448
	 *   0 - Disabled
449
	 *   1 - Enabled
450
	 */
451
	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
452
	if (phy->disable_polarity_correction == 1)
453
		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
454
455
	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
456
	if (ret_val)
457
		return ret_val;
458
459
	if (phy->revision < 4) {
460
		/* Force TX_CLK in the Extended PHY Specific Control Register
461
		 * to 25MHz clock.
462
		 */
463
		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
464
		if (ret_val)
465
			return ret_val;
466
467
		phy_data |= M88E1000_EPSCR_TX_CLK_25;
468
469
		if ((phy->revision == 2) &&
470
		    (phy->id == M88E1111_I_PHY_ID)) {
471
			/* 82573L PHY - set the downshift counter to 5x. */
472
			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
473
			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
474
		} else {
475
			/* Configure Master and Slave downshift values */
476
			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
477
				      M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
478
			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
479
				     M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
480
		}
481
		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
482
		if (ret_val)
483
			return ret_val;
484
	}
485
486
	/* Commit the changes. */
487
	ret_val = e1000e_commit_phy(hw);
488
	if (ret_val)
489
		hw_dbg(hw, "Error committing the PHY changes\n");
490
491
	return ret_val;
492
}
493
494
/**
495
 *  e1000e_copper_link_setup_igp - Setup igp PHY's for copper link
496
 *  @hw: pointer to the HW structure
497
 *
498
 *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
499
 *  igp PHY's.
500
 **/
501
s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
502
{
503
	struct e1000_phy_info *phy = &hw->phy;
504
	s32 ret_val;
505
	u16 data;
506
507
	ret_val = e1000_phy_hw_reset(hw);
508
	if (ret_val) {
509
		hw_dbg(hw, "Error resetting the PHY.\n");
510
		return ret_val;
511
	}
512
513
	/* Wait 15ms for MAC to configure PHY from NVM settings. */
514
	msleep(15);
515
516
	/* disable lplu d0 during driver init */
517
	ret_val = e1000_set_d0_lplu_state(hw, 0);
518
	if (ret_val) {
519
		hw_dbg(hw, "Error Disabling LPLU D0\n");
520
		return ret_val;
521
	}
522
	/* Configure mdi-mdix settings */
523
	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
524
	if (ret_val)
525
		return ret_val;
526
527
	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
528
529
	switch (phy->mdix) {
530
	case 1:
531
		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
532
		break;
533
	case 2:
534
		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
535
		break;
536
	case 0:
537
	default:
538
		data |= IGP01E1000_PSCR_AUTO_MDIX;
539
		break;
540
	}
541
	ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data);
542
	if (ret_val)
543
		return ret_val;
544
545
	/* set auto-master slave resolution settings */
546
	if (hw->mac.autoneg) {
547
		/* when autonegotiation advertisement is only 1000Mbps then we
548
		 * should disable SmartSpeed and enable Auto MasterSlave
549
		 * resolution as hardware default. */
550
		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
551
			/* Disable SmartSpeed */
552
			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
553
						     &data);
554
			if (ret_val)
555
				return ret_val;
556
557
			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
558
			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
559
						     data);
560
			if (ret_val)
561
				return ret_val;
562
563
			/* Set auto Master/Slave resolution process */
564
			ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
565
			if (ret_val)
566
				return ret_val;
567
568
			data &= ~CR_1000T_MS_ENABLE;
569
			ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
570
			if (ret_val)
571
				return ret_val;
572
		}
573
574
		ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
575
		if (ret_val)
576
			return ret_val;
577
578
		/* load defaults for future use */
579
		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
580
			((data & CR_1000T_MS_VALUE) ?
581
			e1000_ms_force_master :
582
			e1000_ms_force_slave) :
583
			e1000_ms_auto;
584
585
		switch (phy->ms_type) {
586
		case e1000_ms_force_master:
587
			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
588
			break;
589
		case e1000_ms_force_slave:
590
			data |= CR_1000T_MS_ENABLE;
591
			data &= ~(CR_1000T_MS_VALUE);
592
			break;
593
		case e1000_ms_auto:
594
			data &= ~CR_1000T_MS_ENABLE;
595
		default:
596
			break;
597
		}
598
		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
599
	}
600
601
	return ret_val;
602
}
603
604
/**
605
 *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
606
 *  @hw: pointer to the HW structure
607
 *
608
 *  Reads the MII auto-neg advertisement register and/or the 1000T control
609
 *  register and if the PHY is already setup for auto-negotiation, then
610
 *  return successful.  Otherwise, setup advertisement and flow control to
611
 *  the appropriate values for the wanted auto-negotiation.
612
 **/
613
static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
614
{
615
	struct e1000_phy_info *phy = &hw->phy;
616
	s32 ret_val;
617
	u16 mii_autoneg_adv_reg;
618
	u16 mii_1000t_ctrl_reg = 0;
619
620
	phy->autoneg_advertised &= phy->autoneg_mask;
621
622
	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
623
	ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
624
	if (ret_val)
625
		return ret_val;
626
627
	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
628
		/* Read the MII 1000Base-T Control Register (Address 9). */
629
		ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
630
		if (ret_val)
631
			return ret_val;
632
	}
633
634
	/* Need to parse both autoneg_advertised and fc and set up
635
	 * the appropriate PHY registers.  First we will parse for
636
	 * autoneg_advertised software override.  Since we can advertise
637
	 * a plethora of combinations, we need to check each bit
638
	 * individually.
639
	 */
640
641
	/* First we clear all the 10/100 mb speed bits in the Auto-Neg
642
	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
643
	 * the  1000Base-T Control Register (Address 9).
644
	 */
645
	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
646
				 NWAY_AR_100TX_HD_CAPS |
647
				 NWAY_AR_10T_FD_CAPS   |
648
				 NWAY_AR_10T_HD_CAPS);
649
	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
650
651
	hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised);
652
653
	/* Do we want to advertise 10 Mb Half Duplex? */
654
	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
655
		hw_dbg(hw, "Advertise 10mb Half duplex\n");
656
		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
657
	}
658
659
	/* Do we want to advertise 10 Mb Full Duplex? */
660
	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
661
		hw_dbg(hw, "Advertise 10mb Full duplex\n");
662
		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
663
	}
664
665
	/* Do we want to advertise 100 Mb Half Duplex? */
666
	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
667
		hw_dbg(hw, "Advertise 100mb Half duplex\n");
668
		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
669
	}
670
671
	/* Do we want to advertise 100 Mb Full Duplex? */
672
	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
673
		hw_dbg(hw, "Advertise 100mb Full duplex\n");
674
		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
675
	}
676
677
	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
678
	if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
679
		hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n");
680
681
	/* Do we want to advertise 1000 Mb Full Duplex? */
682
	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
683
		hw_dbg(hw, "Advertise 1000mb Full duplex\n");
684
		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
685
	}
686
687
	/* Check for a software override of the flow control settings, and
688
	 * setup the PHY advertisement registers accordingly.  If
689
	 * auto-negotiation is enabled, then software will have to set the
690
	 * "PAUSE" bits to the correct value in the Auto-Negotiation
691
	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
692
	 * negotiation.
693
	 *
694
	 * The possible values of the "fc" parameter are:
695
	 *      0:  Flow control is completely disabled
696
	 *      1:  Rx flow control is enabled (we can receive pause frames
697
	 *	  but not send pause frames).
698
	 *      2:  Tx flow control is enabled (we can send pause frames
699
	 *	  but we do not support receiving pause frames).
700
	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
701
	 *  other:  No software override.  The flow control configuration
702
	 *	  in the EEPROM is used.
703
	 */
704
	switch (hw->mac.fc) {
705
	case e1000_fc_none:
706
		/* Flow control (RX & TX) is completely disabled by a
707
		 * software over-ride.
708
		 */
709
		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
710
		break;
711
	case e1000_fc_rx_pause:
712
		/* RX Flow control is enabled, and TX Flow control is
713
		 * disabled, by a software over-ride.
714
		 */
715
		/* Since there really isn't a way to advertise that we are
716
		 * capable of RX Pause ONLY, we will advertise that we
717
		 * support both symmetric and asymmetric RX PAUSE.  Later
718
		 * (in e1000e_config_fc_after_link_up) we will disable the
719
		 * hw's ability to send PAUSE frames.
720
		 */
721
		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
722
		break;
723
	case e1000_fc_tx_pause:
724
		/* TX Flow control is enabled, and RX Flow control is
725
		 * disabled, by a software over-ride.
726
		 */
727
		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
728
		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
729
		break;
730
	case e1000_fc_full:
731
		/* Flow control (both RX and TX) is enabled by a software
732
		 * over-ride.
733
		 */
734
		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
735
		break;
736
	default:
737
		hw_dbg(hw, "Flow control param set incorrectly\n");
738
		ret_val = -E1000_ERR_CONFIG;
739
		return ret_val;
740
	}
741
742
	ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
743
	if (ret_val)
744
		return ret_val;
745
746
	hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
747
748
	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
749
		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
750
	}
751
752
	return ret_val;
753
}
754
755
/**
756
 *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
757
 *  @hw: pointer to the HW structure
758
 *
759
 *  Performs initial bounds checking on autoneg advertisement parameter, then
760
 *  configure to advertise the full capability.  Setup the PHY to autoneg
761
 *  and restart the negotiation process between the link partner.  If
762
 *  wait_for_link, then wait for autoneg to complete before exiting.
763
 **/
764
static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
765
{
766
	struct e1000_phy_info *phy = &hw->phy;
767
	s32 ret_val;
768
	u16 phy_ctrl;
769
770
	/* Perform some bounds checking on the autoneg advertisement
771
	 * parameter.
772
	 */
773
	phy->autoneg_advertised &= phy->autoneg_mask;
774
775
	/* If autoneg_advertised is zero, we assume it was not defaulted
776
	 * by the calling code so we set to advertise full capability.
777
	 */
778
	if (phy->autoneg_advertised == 0)
779
		phy->autoneg_advertised = phy->autoneg_mask;
780
781
	hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n");
782
	ret_val = e1000_phy_setup_autoneg(hw);
783
	if (ret_val) {
784
		hw_dbg(hw, "Error Setting up Auto-Negotiation\n");
785
		return ret_val;
786
	}
787
	hw_dbg(hw, "Restarting Auto-Neg\n");
788
789
	/* Restart auto-negotiation by setting the Auto Neg Enable bit and
790
	 * the Auto Neg Restart bit in the PHY control register.
791
	 */
792
	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
793
	if (ret_val)
794
		return ret_val;
795
796
	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
797
	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
798
	if (ret_val)
799
		return ret_val;
800
801
	/* Does the user want to wait for Auto-Neg to complete here, or
802
	 * check at a later time (for example, callback routine).
803
	 */
804
	if (phy->wait_for_link) {
805
		ret_val = e1000_wait_autoneg(hw);
806
		if (ret_val) {
807
			hw_dbg(hw, "Error while waiting for "
808
				 "autoneg to complete\n");
809
			return ret_val;
810
		}
811
	}
812
813
	hw->mac.get_link_status = 1;
814
815
	return ret_val;
816
}
817
818
/**
819
 *  e1000e_setup_copper_link - Configure copper link settings
820
 *  @hw: pointer to the HW structure
821
 *
822
 *  Calls the appropriate function to configure the link for auto-neg or forced
823
 *  speed and duplex.  Then we check for link, once link is established calls
824
 *  to configure collision distance and flow control are called.  If link is
825
 *  not established, we return -E1000_ERR_PHY (-2).
826
 **/
827
s32 e1000e_setup_copper_link(struct e1000_hw *hw)
828
{
829
	s32 ret_val;
830
	bool link;
831
832
	if (hw->mac.autoneg) {
833
		/* Setup autoneg and flow control advertisement and perform
834
		 * autonegotiation. */
835
		ret_val = e1000_copper_link_autoneg(hw);
836
		if (ret_val)
837
			return ret_val;
838
	} else {
839
		/* PHY will be set to 10H, 10F, 100H or 100F
840
		 * depending on user settings. */
841
		hw_dbg(hw, "Forcing Speed and Duplex\n");
842
		ret_val = e1000_phy_force_speed_duplex(hw);
843
		if (ret_val) {
844
			hw_dbg(hw, "Error Forcing Speed and Duplex\n");
845
			return ret_val;
846
		}
847
	}
848
849
	/* Check link status. Wait up to 100 microseconds for link to become
850
	 * valid.
851
	 */
852
	ret_val = e1000e_phy_has_link_generic(hw,
853
					     COPPER_LINK_UP_LIMIT,
854
					     10,
855
					     &link);
856
	if (ret_val)
857
		return ret_val;
858
859
	if (link) {
860
		hw_dbg(hw, "Valid link established!!!\n");
861
		e1000e_config_collision_dist(hw);
862
		ret_val = e1000e_config_fc_after_link_up(hw);
863
	} else {
864
		hw_dbg(hw, "Unable to establish link!!!\n");
865
	}
866
867
	return ret_val;
868
}
869
870
/**
871
 *  e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
872
 *  @hw: pointer to the HW structure
873
 *
874
 *  Calls the PHY setup function to force speed and duplex.  Clears the
875
 *  auto-crossover to force MDI manually.  Waits for link and returns
876
 *  successful if link up is successful, else -E1000_ERR_PHY (-2).
877
 **/
878
s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
879
{
880
	struct e1000_phy_info *phy = &hw->phy;
881
	s32 ret_val;
882
	u16 phy_data;
883
	bool link;
884
885
	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
886
	if (ret_val)
887
		return ret_val;
888
889
	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
890
891
	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
892
	if (ret_val)
893
		return ret_val;
894
895
	/* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
896
	 * forced whenever speed and duplex are forced.
897
	 */
898
	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
899
	if (ret_val)
900
		return ret_val;
901
902
	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
903
	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
904
905
	ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
906
	if (ret_val)
907
		return ret_val;
908
909
	hw_dbg(hw, "IGP PSCR: %X\n", phy_data);
910
911
	udelay(1);
912
913
	if (phy->wait_for_link) {
914
		hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n");
915
916
		ret_val = e1000e_phy_has_link_generic(hw,
917
						     PHY_FORCE_LIMIT,
918
						     100000,
919
						     &link);
920
		if (ret_val)
921
			return ret_val;
922
923
		if (!link)
924
			hw_dbg(hw, "Link taking longer than expected.\n");
925
926
		/* Try once more */
927
		ret_val = e1000e_phy_has_link_generic(hw,
928
						     PHY_FORCE_LIMIT,
929
						     100000,
930
						     &link);
931
		if (ret_val)
932
			return ret_val;
933
	}
934
935
	return ret_val;
936
}
937
938
/**
939
 *  e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
940
 *  @hw: pointer to the HW structure
941
 *
942
 *  Calls the PHY setup function to force speed and duplex.  Clears the
943
 *  auto-crossover to force MDI manually.  Resets the PHY to commit the
944
 *  changes.  If time expires while waiting for link up, we reset the DSP.
945
 *  After reset, TX_CLK and CRS on TX must be set.  Return successful upon
946
 *  successful completion, else return corresponding error code.
947
 **/
948
s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
949
{
950
	struct e1000_phy_info *phy = &hw->phy;
951
	s32 ret_val;
952
	u16 phy_data;
953
	bool link;
954
955
	/* Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
956
	 * forced whenever speed and duplex are forced.
957
	 */
958
	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
959
	if (ret_val)
960
		return ret_val;
961
962
	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
963
	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
964
	if (ret_val)
965
		return ret_val;
966
967
	hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data);
968
969
	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
970
	if (ret_val)
971
		return ret_val;
972
973
	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
974
975
	/* Reset the phy to commit changes. */
976
	phy_data |= MII_CR_RESET;
977
978
	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
979
	if (ret_val)
980
		return ret_val;
981
982
	udelay(1);
983
984
	if (phy->wait_for_link) {
985
		hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n");
986
987
		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
988
						     100000, &link);
989
		if (ret_val)
990
			return ret_val;
991
992
		if (!link) {
993
			/* We didn't get link.
994
			 * Reset the DSP and cross our fingers.
995
			 */
996
			ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d);
997
			if (ret_val)
998
				return ret_val;
999
			ret_val = e1000e_phy_reset_dsp(hw);
1000
			if (ret_val)
1001
				return ret_val;
1002
		}
1003
1004
		/* Try once more */
1005
		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1006
						     100000, &link);
1007
		if (ret_val)
1008
			return ret_val;
1009
	}
1010
1011
	ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1012
	if (ret_val)
1013
		return ret_val;
1014
1015
	/* Resetting the phy means we need to re-force TX_CLK in the
1016
	 * Extended PHY Specific Control Register to 25MHz clock from
1017
	 * the reset value of 2.5MHz.
1018
	 */
1019
	phy_data |= M88E1000_EPSCR_TX_CLK_25;
1020
	ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
1021
	if (ret_val)
1022
		return ret_val;
1023
1024
	/* In addition, we must re-enable CRS on Tx for both half and full
1025
	 * duplex.
1026
	 */
1027
	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1028
	if (ret_val)
1029
		return ret_val;
1030
1031
	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1032
	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1033
1034
	return ret_val;
1035
}
1036
1037
/**
1038
 *  e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
1039
 *  @hw: pointer to the HW structure
1040
 *  @phy_ctrl: pointer to current value of PHY_CONTROL
1041
 *
1042
 *  Forces speed and duplex on the PHY by doing the following: disable flow
1043
 *  control, force speed/duplex on the MAC, disable auto speed detection,
1044
 *  disable auto-negotiation, configure duplex, configure speed, configure
1045
 *  the collision distance, write configuration to CTRL register.  The
1046
 *  caller must write to the PHY_CONTROL register for these settings to
1047
 *  take affect.
1048
 **/
1049
void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
1050
{
1051
	struct e1000_mac_info *mac = &hw->mac;
1052
	u32 ctrl;
1053
1054
	/* Turn off flow control when forcing speed/duplex */
1055
	mac->fc = e1000_fc_none;
1056
1057
	/* Force speed/duplex on the mac */
1058
	ctrl = er32(CTRL);
1059
	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1060
	ctrl &= ~E1000_CTRL_SPD_SEL;
1061
1062
	/* Disable Auto Speed Detection */
1063
	ctrl &= ~E1000_CTRL_ASDE;
1064
1065
	/* Disable autoneg on the phy */
1066
	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
1067
1068
	/* Forcing Full or Half Duplex? */
1069
	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
1070
		ctrl &= ~E1000_CTRL_FD;
1071
		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
1072
		hw_dbg(hw, "Half Duplex\n");
1073
	} else {
1074
		ctrl |= E1000_CTRL_FD;
1075
		*phy_ctrl |= MII_CR_FULL_DUPLEX;
1076
		hw_dbg(hw, "Full Duplex\n");
1077
	}
1078
1079
	/* Forcing 10mb or 100mb? */
1080
	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
1081
		ctrl |= E1000_CTRL_SPD_100;
1082
		*phy_ctrl |= MII_CR_SPEED_100;
1083
		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1084
		hw_dbg(hw, "Forcing 100mb\n");
1085
	} else {
1086
		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1087
		*phy_ctrl |= MII_CR_SPEED_10;
1088
		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1089
		hw_dbg(hw, "Forcing 10mb\n");
1090
	}
1091
1092
	e1000e_config_collision_dist(hw);
1093
1094
	ew32(CTRL, ctrl);
1095
}
1096
1097
/**
1098
 *  e1000e_set_d3_lplu_state - Sets low power link up state for D3
1099
 *  @hw: pointer to the HW structure
1100
 *  @active: boolean used to enable/disable lplu
1101
 *
1102
 *  Success returns 0, Failure returns 1
1103
 *
1104
 *  The low power link up (lplu) state is set to the power management level D3
1105
 *  and SmartSpeed is disabled when active is true, else clear lplu for D3
1106
 *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
1107
 *  is used during Dx states where the power conservation is most important.
1108
 *  During driver activity, SmartSpeed should be enabled so performance is
1109
 *  maintained.
1110
 **/
1111
s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1112
{
1113
	struct e1000_phy_info *phy = &hw->phy;
1114
	s32 ret_val;
1115
	u16 data;
1116
1117
	ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1118
	if (ret_val)
1119
		return ret_val;
1120
1121
	if (!active) {
1122
		data &= ~IGP02E1000_PM_D3_LPLU;
1123
		ret_val = e1e_wphy(hw,
1124
					     IGP02E1000_PHY_POWER_MGMT,
1125
					     data);
1126
		if (ret_val)
1127
			return ret_val;
1128
		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1129
		 * during Dx states where the power conservation is most
1130
		 * important.  During driver activity we should enable
1131
		 * SmartSpeed, so performance is maintained. */
1132
		if (phy->smart_speed == e1000_smart_speed_on) {
1133
			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1134
						    &data);
1135
			if (ret_val)
1136
				return ret_val;
1137
1138
			data |= IGP01E1000_PSCFR_SMART_SPEED;
1139
			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1140
						     data);
1141
			if (ret_val)
1142
				return ret_val;
1143
		} else if (phy->smart_speed == e1000_smart_speed_off) {
1144
			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1145
						     &data);
1146
			if (ret_val)
1147
				return ret_val;
1148
1149
			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1150
			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1151
						     data);
1152
			if (ret_val)
1153
				return ret_val;
1154
		}
1155
	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1156
		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1157
		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1158
		data |= IGP02E1000_PM_D3_LPLU;
1159
		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
1160
		if (ret_val)
1161
			return ret_val;
1162
1163
		/* When LPLU is enabled, we should disable SmartSpeed */
1164
		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
1165
		if (ret_val)
1166
			return ret_val;
1167
1168
		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1169
		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
1170
	}
1171
1172
	return ret_val;
1173
}
1174
1175
/**
1176
 *  e1000e_check_downshift - Checks whether a downshift in speed occured
1177
 *  @hw: pointer to the HW structure
1178
 *
1179
 *  Success returns 0, Failure returns 1
1180
 *
1181
 *  A downshift is detected by querying the PHY link health.
1182
 **/
1183
s32 e1000e_check_downshift(struct e1000_hw *hw)
1184
{
1185
	struct e1000_phy_info *phy = &hw->phy;
1186
	s32 ret_val;
1187
	u16 phy_data, offset, mask;
1188
1189
	switch (phy->type) {
1190
	case e1000_phy_m88:
1191
	case e1000_phy_gg82563:
1192
		offset	= M88E1000_PHY_SPEC_STATUS;
1193
		mask	= M88E1000_PSSR_DOWNSHIFT;
1194
		break;
1195
	case e1000_phy_igp_2:
1196
	case e1000_phy_igp_3:
1197
		offset	= IGP01E1000_PHY_LINK_HEALTH;
1198
		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
1199
		break;
1200
	default:
1201
		/* speed downshift not supported */
1202
		phy->speed_downgraded = 0;
1203
		return 0;
1204
	}
1205
1206
	ret_val = e1e_rphy(hw, offset, &phy_data);
1207
1208
	if (!ret_val)
1209
		phy->speed_downgraded = (phy_data & mask);
1210
1211
	return ret_val;
1212
}
1213
1214
/**
1215
 *  e1000_check_polarity_m88 - Checks the polarity.
1216
 *  @hw: pointer to the HW structure
1217
 *
1218
 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1219
 *
1220
 *  Polarity is determined based on the PHY specific status register.
1221
 **/
1222
static s32 e1000_check_polarity_m88(struct e1000_hw *hw)
1223
{
1224
	struct e1000_phy_info *phy = &hw->phy;
1225
	s32 ret_val;
1226
	u16 data;
1227
1228
	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
1229
1230
	if (!ret_val)
1231
		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
1232
				      ? e1000_rev_polarity_reversed
1233
				      : e1000_rev_polarity_normal;
1234
1235
	return ret_val;
1236
}
1237
1238
/**
1239
 *  e1000_check_polarity_igp - Checks the polarity.
1240
 *  @hw: pointer to the HW structure
1241
 *
1242
 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1243
 *
1244
 *  Polarity is determined based on the PHY port status register, and the
1245
 *  current speed (since there is no polarity at 100Mbps).
1246
 **/
1247
static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1248
{
1249
	struct e1000_phy_info *phy = &hw->phy;
1250
	s32 ret_val;
1251
	u16 data, offset, mask;
1252
1253
	/* Polarity is determined based on the speed of
1254
	 * our connection. */
1255
	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1256
	if (ret_val)
1257
		return ret_val;
1258
1259
	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1260
	    IGP01E1000_PSSR_SPEED_1000MBPS) {
1261
		offset	= IGP01E1000_PHY_PCS_INIT_REG;
1262
		mask	= IGP01E1000_PHY_POLARITY_MASK;
1263
	} else {
1264
		/* This really only applies to 10Mbps since
1265
		 * there is no polarity for 100Mbps (always 0).
1266
		 */
1267
		offset	= IGP01E1000_PHY_PORT_STATUS;
1268
		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
1269
	}
1270
1271
	ret_val = e1e_rphy(hw, offset, &data);
1272
1273
	if (!ret_val)
1274
		phy->cable_polarity = (data & mask)
1275
				      ? e1000_rev_polarity_reversed
1276
				      : e1000_rev_polarity_normal;
1277
1278
	return ret_val;
1279
}
1280
1281
/**
1282
 *  e1000_wait_autoneg - Wait for auto-neg compeletion
1283
 *  @hw: pointer to the HW structure
1284
 *
1285
 *  Waits for auto-negotiation to complete or for the auto-negotiation time
1286
 *  limit to expire, which ever happens first.
1287
 **/
1288
static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1289
{
1290
	s32 ret_val = 0;
1291
	u16 i, phy_status;
1292
1293
	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
1294
	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
1295
		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1296
		if (ret_val)
1297
			break;
1298
		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1299
		if (ret_val)
1300
			break;
1301
		if (phy_status & MII_SR_AUTONEG_COMPLETE)
1302
			break;
1303
		msleep(100);
1304
	}
1305
1306
	/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1307
	 * has completed.
1308
	 */
1309
	return ret_val;
1310
}
1311
1312
/**
1313
 *  e1000e_phy_has_link_generic - Polls PHY for link
1314
 *  @hw: pointer to the HW structure
1315
 *  @iterations: number of times to poll for link
1316
 *  @usec_interval: delay between polling attempts
1317
 *  @success: pointer to whether polling was successful or not
1318
 *
1319
 *  Polls the PHY status register for link, 'iterations' number of times.
1320
 **/
1321
s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1322
			       u32 usec_interval, bool *success)
1323
{
1324
	s32 ret_val = 0;
1325
	u16 i, phy_status;
1326
1327
	for (i = 0; i < iterations; i++) {
1328
		/* Some PHYs require the PHY_STATUS register to be read
1329
		 * twice due to the link bit being sticky.  No harm doing
1330
		 * it across the board.
1331
		 */
1332
		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1333
		if (ret_val)
1334
			break;
1335
		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1336
		if (ret_val)
1337
			break;
1338
		if (phy_status & MII_SR_LINK_STATUS)
1339
			break;
1340
		if (usec_interval >= 1000)
1341
			mdelay(usec_interval/1000);
1342
		else
1343
			udelay(usec_interval);
1344
	}
1345
1346
	*success = (i < iterations);
1347
1348
	return ret_val;
1349
}
1350
1351
/**
1352
 *  e1000e_get_cable_length_m88 - Determine cable length for m88 PHY
1353
 *  @hw: pointer to the HW structure
1354
 *
1355
 *  Reads the PHY specific status register to retrieve the cable length
1356
 *  information.  The cable length is determined by averaging the minimum and
1357
 *  maximum values to get the "average" cable length.  The m88 PHY has four
1358
 *  possible cable length values, which are:
1359
 *	Register Value		Cable Length
1360
 *	0			< 50 meters
1361
 *	1			50 - 80 meters
1362
 *	2			80 - 110 meters
1363
 *	3			110 - 140 meters
1364
 *	4			> 140 meters
1365
 **/
1366
s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
1367
{
1368
	struct e1000_phy_info *phy = &hw->phy;
1369
	s32 ret_val;
1370
	u16 phy_data, index;
1371
1372
	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1373
	if (ret_val)
1374
		return ret_val;
1375
1376
	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1377
		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1378
	phy->min_cable_length = e1000_m88_cable_length_table[index];
1379
	phy->max_cable_length = e1000_m88_cable_length_table[index+1];
1380
1381
	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1382
1383
	return ret_val;
1384
}
1385
1386
/**
1387
 *  e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1388
 *  @hw: pointer to the HW structure
1389
 *
1390
 *  The automatic gain control (agc) normalizes the amplitude of the
1391
 *  received signal, adjusting for the attenuation produced by the
1392
 *  cable.  By reading the AGC registers, which reperesent the
1393
 *  cobination of course and fine gain value, the value can be put
1394
 *  into a lookup table to obtain the approximate cable length
1395
 *  for each channel.
1396
 **/
1397
s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1398
{
1399
	struct e1000_phy_info *phy = &hw->phy;
1400
	s32 ret_val;
1401
	u16 phy_data, i, agc_value = 0;
1402
	u16 cur_agc_index, max_agc_index = 0;
1403
	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1404
	u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
1405
							 {IGP02E1000_PHY_AGC_A,
1406
							  IGP02E1000_PHY_AGC_B,
1407
							  IGP02E1000_PHY_AGC_C,
1408
							  IGP02E1000_PHY_AGC_D};
1409
1410
	/* Read the AGC registers for all channels */
1411
	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
1412
		ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data);
1413
		if (ret_val)
1414
			return ret_val;
1415
1416
		/* Getting bits 15:9, which represent the combination of
1417
		 * course and fine gain values.  The result is a number
1418
		 * that can be put into the lookup table to obtain the
1419
		 * approximate cable length. */
1420
		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1421
				IGP02E1000_AGC_LENGTH_MASK;
1422
1423
		/* Array index bound check. */
1424
		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
1425
		    (cur_agc_index == 0))
1426
			return -E1000_ERR_PHY;
1427
1428
		/* Remove min & max AGC values from calculation. */
1429
		if (e1000_igp_2_cable_length_table[min_agc_index] >
1430
		    e1000_igp_2_cable_length_table[cur_agc_index])
1431
			min_agc_index = cur_agc_index;
1432
		if (e1000_igp_2_cable_length_table[max_agc_index] <
1433
		    e1000_igp_2_cable_length_table[cur_agc_index])
1434
			max_agc_index = cur_agc_index;
1435
1436
		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
1437
	}
1438
1439
	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
1440
		      e1000_igp_2_cable_length_table[max_agc_index]);
1441
	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
1442
1443
	/* Calculate cable length with the error range of +/- 10 meters. */
1444
	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
1445
				 (agc_value - IGP02E1000_AGC_RANGE) : 0;
1446
	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
1447
1448
	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1449
1450
	return ret_val;
1451
}
1452
1453
/**
1454
 *  e1000e_get_phy_info_m88 - Retrieve PHY information
1455
 *  @hw: pointer to the HW structure
1456
 *
1457
 *  Valid for only copper links.  Read the PHY status register (sticky read)
1458
 *  to verify that link is up.  Read the PHY special control register to
1459
 *  determine the polarity and 10base-T extended distance.  Read the PHY
1460
 *  special status register to determine MDI/MDIx and current speed.  If
1461
 *  speed is 1000, then determine cable length, local and remote receiver.
1462
 **/
1463
s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1464
{
1465
	struct e1000_phy_info *phy = &hw->phy;
1466
	s32  ret_val;
1467
	u16 phy_data;
1468
	bool link;
1469
1470
	if (hw->media_type != e1000_media_type_copper) {
1471
		hw_dbg(hw, "Phy info is only valid for copper media\n");
1472
		return -E1000_ERR_CONFIG;
1473
	}
1474
1475
	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1476
	if (ret_val)
1477
		return ret_val;
1478
1479
	if (!link) {
1480
		hw_dbg(hw, "Phy info is only valid if link is up\n");
1481
		return -E1000_ERR_CONFIG;
1482
	}
1483
1484
	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1485
	if (ret_val)
1486
		return ret_val;
1487
1488
	phy->polarity_correction = (phy_data &
1489
				    M88E1000_PSCR_POLARITY_REVERSAL);
1490
1491
	ret_val = e1000_check_polarity_m88(hw);
1492
	if (ret_val)
1493
		return ret_val;
1494
1495
	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1496
	if (ret_val)
1497
		return ret_val;
1498
1499
	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
1500
1501
	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1502
		ret_val = e1000_get_cable_length(hw);
1503
		if (ret_val)
1504
			return ret_val;
1505
1506
		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
1507
		if (ret_val)
1508
			return ret_val;
1509
1510
		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
1511
				? e1000_1000t_rx_status_ok
1512
				: e1000_1000t_rx_status_not_ok;
1513
1514
		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
1515
				 ? e1000_1000t_rx_status_ok
1516
				 : e1000_1000t_rx_status_not_ok;
1517
	} else {
1518
		/* Set values to "undefined" */
1519
		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1520
		phy->local_rx = e1000_1000t_rx_status_undefined;
1521
		phy->remote_rx = e1000_1000t_rx_status_undefined;
1522
	}
1523
1524
	return ret_val;
1525
}
1526
1527
/**
1528
 *  e1000e_get_phy_info_igp - Retrieve igp PHY information
1529
 *  @hw: pointer to the HW structure
1530
 *
1531
 *  Read PHY status to determine if link is up.  If link is up, then
1532
 *  set/determine 10base-T extended distance and polarity correction.  Read
1533
 *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
1534
 *  determine on the cable length, local and remote receiver.
1535
 **/
1536
s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
1537
{
1538
	struct e1000_phy_info *phy = &hw->phy;
1539
	s32 ret_val;
1540
	u16 data;
1541
	bool link;
1542
1543
	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1544
	if (ret_val)
1545
		return ret_val;
1546
1547
	if (!link) {
1548
		hw_dbg(hw, "Phy info is only valid if link is up\n");
1549
		return -E1000_ERR_CONFIG;
1550
	}
1551
1552
	phy->polarity_correction = 1;
1553
1554
	ret_val = e1000_check_polarity_igp(hw);
1555
	if (ret_val)
1556
		return ret_val;
1557
1558
	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1559
	if (ret_val)
1560
		return ret_val;
1561
1562
	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
1563
1564
	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1565
	    IGP01E1000_PSSR_SPEED_1000MBPS) {
1566
		ret_val = e1000_get_cable_length(hw);
1567
		if (ret_val)
1568
			return ret_val;
1569
1570
		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
1571
		if (ret_val)
1572
			return ret_val;
1573
1574
		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
1575
				? e1000_1000t_rx_status_ok
1576
				: e1000_1000t_rx_status_not_ok;
1577
1578
		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
1579
				 ? e1000_1000t_rx_status_ok
1580
				 : e1000_1000t_rx_status_not_ok;
1581
	} else {
1582
		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
1583
		phy->local_rx = e1000_1000t_rx_status_undefined;
1584
		phy->remote_rx = e1000_1000t_rx_status_undefined;
1585
	}
1586
1587
	return ret_val;
1588
}
1589
1590
/**
1591
 *  e1000e_phy_sw_reset - PHY software reset
1592
 *  @hw: pointer to the HW structure
1593
 *
1594
 *  Does a software reset of the PHY by reading the PHY control register and
1595
 *  setting/write the control register reset bit to the PHY.
1596
 **/
1597
s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
1598
{
1599
	s32 ret_val;
1600
	u16 phy_ctrl;
1601
1602
	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
1603
	if (ret_val)
1604
		return ret_val;
1605
1606
	phy_ctrl |= MII_CR_RESET;
1607
	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
1608
	if (ret_val)
1609
		return ret_val;
1610
1611
	udelay(1);
1612
1613
	return ret_val;
1614
}
1615
1616
/**
1617
 *  e1000e_phy_hw_reset_generic - PHY hardware reset
1618
 *  @hw: pointer to the HW structure
1619
 *
1620
 *  Verify the reset block is not blocking us from resetting.  Acquire
1621
 *  semaphore (if necessary) and read/set/write the device control reset
1622
 *  bit in the PHY.  Wait the appropriate delay time for the device to
1623
 *  reset and relase the semaphore (if necessary).
1624
 **/
1625
s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
1626
{
1627
	struct e1000_phy_info *phy = &hw->phy;
1628
	s32 ret_val;
1629
	u32 ctrl;
1630
1631
	ret_val = e1000_check_reset_block(hw);
1632
	if (ret_val)
1633
		return 0;
1634
1635
	ret_val = phy->ops.acquire_phy(hw);
1636
	if (ret_val)
1637
		return ret_val;
1638
1639
	ctrl = er32(CTRL);
1640
	ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
1641
	e1e_flush();
1642
1643
	udelay(phy->reset_delay_us);
1644
1645
	ew32(CTRL, ctrl);
1646
	e1e_flush();
1647
1648
	udelay(150);
1649
1650
	phy->ops.release_phy(hw);
1651
1652
	return e1000_get_phy_cfg_done(hw);
1653
}
1654
1655
/**
1656
 *  e1000e_get_cfg_done - Generic configuration done
1657
 *  @hw: pointer to the HW structure
1658
 *
1659
 *  Generic function to wait 10 milli-seconds for configuration to complete
1660
 *  and return success.
1661
 **/
1662
s32 e1000e_get_cfg_done(struct e1000_hw *hw)
1663
{
1664
	mdelay(10);
1665
	return 0;
1666
}
1667
1668
/* Internal function pointers */
1669
1670
/**
1671
 *  e1000_get_phy_cfg_done - Generic PHY configuration done
1672
 *  @hw: pointer to the HW structure
1673
 *
1674
 *  Return success if silicon family did not implement a family specific
1675
 *  get_cfg_done function.
1676
 **/
1677
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
1678
{
1679
	if (hw->phy.ops.get_cfg_done)
1680
		return hw->phy.ops.get_cfg_done(hw);
1681
1682
	return 0;
1683
}
1684
1685
/**
1686
 *  e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
1687
 *  @hw: pointer to the HW structure
1688
 *
1689
 *  When the silicon family has not implemented a forced speed/duplex
1690
 *  function for the PHY, simply return 0.
1691
 **/
1692
static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1693
{
1694
	if (hw->phy.ops.force_speed_duplex)
1695
		return hw->phy.ops.force_speed_duplex(hw);
1696
1697
	return 0;
1698
}
1699
1700
/**
1701
 *  e1000e_get_phy_type_from_id - Get PHY type from id
1702
 *  @phy_id: phy_id read from the phy
1703
 *
1704
 *  Returns the phy type from the id.
1705
 **/
1706
enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
1707
{
1708
	enum e1000_phy_type phy_type = e1000_phy_unknown;
1709
1710
	switch (phy_id) {
1711
	case M88E1000_I_PHY_ID:
1712
	case M88E1000_E_PHY_ID:
1713
	case M88E1111_I_PHY_ID:
1714
	case M88E1011_I_PHY_ID:
1715
		phy_type = e1000_phy_m88;
1716
		break;
1717
	case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
1718
		phy_type = e1000_phy_igp_2;
1719
		break;
1720
	case GG82563_E_PHY_ID:
1721
		phy_type = e1000_phy_gg82563;
1722
		break;
1723
	case IGP03E1000_E_PHY_ID:
1724
		phy_type = e1000_phy_igp_3;
1725
		break;
1726
	case IFE_E_PHY_ID:
1727
	case IFE_PLUS_E_PHY_ID:
1728
	case IFE_C_E_PHY_ID:
1729
		phy_type = e1000_phy_ife;
1730
		break;
1731
	default:
1732
		phy_type = e1000_phy_unknown;
1733
		break;
1734
	}
1735
	return phy_type;
1736
}
1737
1738
/**
1739
 *  e1000e_commit_phy - Soft PHY reset
1740
 *  @hw: pointer to the HW structure
1741
 *
1742
 *  Performs a soft PHY reset on those that apply. This is a function pointer
1743
 *  entry point called by drivers.
1744
 **/
1745
s32 e1000e_commit_phy(struct e1000_hw *hw)
1746
{
1747
	if (hw->phy.ops.commit_phy)
1748
		return hw->phy.ops.commit_phy(hw);
1749
1750
	return 0;
1751
}
1752
1753
/**
1754
 *  e1000_set_d0_lplu_state - Sets low power link up state for D0
1755
 *  @hw: pointer to the HW structure
1756
 *  @active: boolean used to enable/disable lplu
1757
 *
1758
 *  Success returns 0, Failure returns 1
1759
 *
1760
 *  The low power link up (lplu) state is set to the power management level D0
1761
 *  and SmartSpeed is disabled when active is true, else clear lplu for D0
1762
 *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
1763
 *  is used during Dx states where the power conservation is most important.
1764
 *  During driver activity, SmartSpeed should be enabled so performance is
1765
 *  maintained.  This is a function pointer entry point called by drivers.
1766
 **/
1767
static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
1768
{
1769
	if (hw->phy.ops.set_d0_lplu_state)
1770
		return hw->phy.ops.set_d0_lplu_state(hw, active);
1771
1772
	return 0;
1773
}

Return to bug 196053