Line 0
Link Here
|
|
|
1 |
/******************************************************************************* |
2 |
|
3 |
Intel PRO/1000 Linux driver |
4 |
Copyright(c) 1999 - 2007 Intel Corporation. |
5 |
|
6 |
This program is free software; you can redistribute it and/or modify it |
7 |
under the terms and conditions of the GNU General Public License, |
8 |
version 2, as published by the Free Software Foundation. |
9 |
|
10 |
This program is distributed in the hope it will be useful, but WITHOUT |
11 |
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
12 |
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
13 |
more details. |
14 |
|
15 |
You should have received a copy of the GNU General Public License along with |
16 |
this program; if not, write to the Free Software Foundation, Inc., |
17 |
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
18 |
|
19 |
The full GNU General Public License is included in this distribution in |
20 |
the file called "COPYING". |
21 |
|
22 |
Contact Information: |
23 |
Linux NICS <linux.nics@intel.com> |
24 |
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
25 |
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 |
|
27 |
*******************************************************************************/ |
28 |
|
29 |
#include <linux/module.h> |
30 |
#include <linux/types.h> |
31 |
#include <linux/init.h> |
32 |
#include <linux/pci.h> |
33 |
#include <linux/vmalloc.h> |
34 |
#include <linux/pagemap.h> |
35 |
#include <linux/delay.h> |
36 |
#include <linux/netdevice.h> |
37 |
#include <linux/tcp.h> |
38 |
#include <linux/ipv6.h> |
39 |
#include <net/checksum.h> |
40 |
#include <net/ip6_checksum.h> |
41 |
#include <linux/mii.h> |
42 |
#include <linux/ethtool.h> |
43 |
#include <linux/if_vlan.h> |
44 |
#include <linux/cpu.h> |
45 |
#include <linux/smp.h> |
46 |
|
47 |
#include "e1000.h" |
48 |
|
49 |
#define DRV_VERSION "0.2.0" |
50 |
char e1000e_driver_name[] = "e1000e"; |
51 |
const char e1000e_driver_version[] = DRV_VERSION; |
52 |
|
53 |
static const struct e1000_info *e1000_info_tbl[] = { |
54 |
[board_82571] = &e1000_82571_info, |
55 |
[board_82572] = &e1000_82572_info, |
56 |
[board_82573] = &e1000_82573_info, |
57 |
[board_80003es2lan] = &e1000_es2_info, |
58 |
[board_ich8lan] = &e1000_ich8_info, |
59 |
[board_ich9lan] = &e1000_ich9_info, |
60 |
}; |
61 |
|
62 |
#ifdef DEBUG |
63 |
/** |
64 |
* e1000_get_hw_dev_name - return device name string |
65 |
* used by hardware layer to print debugging information |
66 |
**/ |
67 |
char *e1000e_get_hw_dev_name(struct e1000_hw *hw) |
68 |
{ |
69 |
struct e1000_adapter *adapter = hw->back; |
70 |
struct net_device *netdev = adapter->netdev; |
71 |
return netdev->name; |
72 |
} |
73 |
#endif |
74 |
|
75 |
/** |
76 |
* e1000_desc_unused - calculate if we have unused descriptors |
77 |
**/ |
78 |
static int e1000_desc_unused(struct e1000_ring *ring) |
79 |
{ |
80 |
if (ring->next_to_clean > ring->next_to_use) |
81 |
return ring->next_to_clean - ring->next_to_use - 1; |
82 |
|
83 |
return ring->count + ring->next_to_clean - ring->next_to_use - 1; |
84 |
} |
85 |
|
86 |
/** |
87 |
* e1000_receive_skb - helper function to handle rx indications |
88 |
* @adapter: board private structure |
89 |
* @status: descriptor status field as written by hardware |
90 |
* @vlan: descriptor vlan field as written by hardware (no le/be conversion) |
91 |
* @skb: pointer to sk_buff to be indicated to stack |
92 |
**/ |
93 |
static void e1000_receive_skb(struct e1000_adapter *adapter, |
94 |
struct net_device *netdev, |
95 |
struct sk_buff *skb, |
96 |
u8 status, u16 vlan) |
97 |
{ |
98 |
skb->protocol = eth_type_trans(skb, netdev); |
99 |
|
100 |
if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) |
101 |
vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
102 |
le16_to_cpu(vlan) & |
103 |
E1000_RXD_SPC_VLAN_MASK); |
104 |
else |
105 |
netif_receive_skb(skb); |
106 |
|
107 |
netdev->last_rx = jiffies; |
108 |
} |
109 |
|
110 |
/** |
111 |
* e1000_rx_checksum - Receive Checksum Offload for 82543 |
112 |
* @adapter: board private structure |
113 |
* @status_err: receive descriptor status and error fields |
114 |
* @csum: receive descriptor csum field |
115 |
* @sk_buff: socket buffer with received data |
116 |
**/ |
117 |
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, |
118 |
u32 csum, struct sk_buff *skb) |
119 |
{ |
120 |
u16 status = (u16)status_err; |
121 |
u8 errors = (u8)(status_err >> 24); |
122 |
skb->ip_summed = CHECKSUM_NONE; |
123 |
|
124 |
/* Ignore Checksum bit is set */ |
125 |
if (status & E1000_RXD_STAT_IXSM) |
126 |
return; |
127 |
/* TCP/UDP checksum error bit is set */ |
128 |
if (errors & E1000_RXD_ERR_TCPE) { |
129 |
/* let the stack verify checksum errors */ |
130 |
adapter->hw_csum_err++; |
131 |
return; |
132 |
} |
133 |
|
134 |
/* TCP/UDP Checksum has not been calculated */ |
135 |
if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) |
136 |
return; |
137 |
|
138 |
/* It must be a TCP or UDP packet with a valid checksum */ |
139 |
if (status & E1000_RXD_STAT_TCPCS) { |
140 |
/* TCP checksum is good */ |
141 |
skb->ip_summed = CHECKSUM_UNNECESSARY; |
142 |
} else { |
143 |
/* IP fragment with UDP payload */ |
144 |
/* Hardware complements the payload checksum, so we undo it |
145 |
* and then put the value in host order for further stack use. |
146 |
*/ |
147 |
csum = ntohl(csum ^ 0xFFFF); |
148 |
skb->csum = csum; |
149 |
skb->ip_summed = CHECKSUM_COMPLETE; |
150 |
} |
151 |
adapter->hw_csum_good++; |
152 |
} |
153 |
|
154 |
/** |
155 |
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
156 |
* @adapter: address of board private structure |
157 |
**/ |
158 |
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
159 |
int cleaned_count) |
160 |
{ |
161 |
struct net_device *netdev = adapter->netdev; |
162 |
struct pci_dev *pdev = adapter->pdev; |
163 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
164 |
struct e1000_rx_desc *rx_desc; |
165 |
struct e1000_buffer *buffer_info; |
166 |
struct sk_buff *skb; |
167 |
unsigned int i; |
168 |
unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; |
169 |
|
170 |
i = rx_ring->next_to_use; |
171 |
buffer_info = &rx_ring->buffer_info[i]; |
172 |
|
173 |
while (cleaned_count--) { |
174 |
skb = buffer_info->skb; |
175 |
if (skb) { |
176 |
skb_trim(skb, 0); |
177 |
goto map_skb; |
178 |
} |
179 |
|
180 |
skb = netdev_alloc_skb(netdev, bufsz); |
181 |
if (!skb) { |
182 |
/* Better luck next round */ |
183 |
adapter->alloc_rx_buff_failed++; |
184 |
break; |
185 |
} |
186 |
|
187 |
/* Make buffer alignment 2 beyond a 16 byte boundary |
188 |
* this will result in a 16 byte aligned IP header after |
189 |
* the 14 byte MAC header is removed |
190 |
*/ |
191 |
skb_reserve(skb, NET_IP_ALIGN); |
192 |
|
193 |
buffer_info->skb = skb; |
194 |
map_skb: |
195 |
buffer_info->dma = pci_map_single(pdev, skb->data, |
196 |
adapter->rx_buffer_len, |
197 |
PCI_DMA_FROMDEVICE); |
198 |
if (pci_dma_mapping_error(buffer_info->dma)) { |
199 |
dev_err(&pdev->dev, "RX DMA map failed\n"); |
200 |
adapter->rx_dma_failed++; |
201 |
break; |
202 |
} |
203 |
|
204 |
rx_desc = E1000_RX_DESC(*rx_ring, i); |
205 |
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
206 |
|
207 |
i++; |
208 |
if (i == rx_ring->count) |
209 |
i = 0; |
210 |
buffer_info = &rx_ring->buffer_info[i]; |
211 |
} |
212 |
|
213 |
if (rx_ring->next_to_use != i) { |
214 |
rx_ring->next_to_use = i; |
215 |
if (i-- == 0) |
216 |
i = (rx_ring->count - 1); |
217 |
|
218 |
/* Force memory writes to complete before letting h/w |
219 |
* know there are new descriptors to fetch. (Only |
220 |
* applicable for weak-ordered memory model archs, |
221 |
* such as IA-64). */ |
222 |
wmb(); |
223 |
writel(i, adapter->hw.hw_addr + rx_ring->tail); |
224 |
} |
225 |
} |
226 |
|
227 |
/** |
228 |
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split |
229 |
* @adapter: address of board private structure |
230 |
**/ |
231 |
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, |
232 |
int cleaned_count) |
233 |
{ |
234 |
struct net_device *netdev = adapter->netdev; |
235 |
struct pci_dev *pdev = adapter->pdev; |
236 |
union e1000_rx_desc_packet_split *rx_desc; |
237 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
238 |
struct e1000_buffer *buffer_info; |
239 |
struct e1000_ps_page *ps_page; |
240 |
struct sk_buff *skb; |
241 |
unsigned int i, j; |
242 |
|
243 |
i = rx_ring->next_to_use; |
244 |
buffer_info = &rx_ring->buffer_info[i]; |
245 |
|
246 |
while (cleaned_count--) { |
247 |
rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
248 |
|
249 |
for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
250 |
ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) |
251 |
+ j]; |
252 |
if (j < adapter->rx_ps_pages) { |
253 |
if (!ps_page->page) { |
254 |
ps_page->page = alloc_page(GFP_ATOMIC); |
255 |
if (!ps_page->page) { |
256 |
adapter->alloc_rx_buff_failed++; |
257 |
goto no_buffers; |
258 |
} |
259 |
ps_page->dma = pci_map_page(pdev, |
260 |
ps_page->page, |
261 |
0, PAGE_SIZE, |
262 |
PCI_DMA_FROMDEVICE); |
263 |
if (pci_dma_mapping_error( |
264 |
ps_page->dma)) { |
265 |
dev_err(&adapter->pdev->dev, |
266 |
"RX DMA page map failed\n"); |
267 |
adapter->rx_dma_failed++; |
268 |
goto no_buffers; |
269 |
} |
270 |
} |
271 |
/* |
272 |
* Refresh the desc even if buffer_addrs |
273 |
* didn't change because each write-back |
274 |
* erases this info. |
275 |
*/ |
276 |
rx_desc->read.buffer_addr[j+1] = |
277 |
cpu_to_le64(ps_page->dma); |
278 |
} else { |
279 |
rx_desc->read.buffer_addr[j+1] = ~0; |
280 |
} |
281 |
} |
282 |
|
283 |
skb = netdev_alloc_skb(netdev, |
284 |
adapter->rx_ps_bsize0 + NET_IP_ALIGN); |
285 |
|
286 |
if (!skb) { |
287 |
adapter->alloc_rx_buff_failed++; |
288 |
break; |
289 |
} |
290 |
|
291 |
/* Make buffer alignment 2 beyond a 16 byte boundary |
292 |
* this will result in a 16 byte aligned IP header after |
293 |
* the 14 byte MAC header is removed |
294 |
*/ |
295 |
skb_reserve(skb, NET_IP_ALIGN); |
296 |
|
297 |
buffer_info->skb = skb; |
298 |
buffer_info->dma = pci_map_single(pdev, skb->data, |
299 |
adapter->rx_ps_bsize0, |
300 |
PCI_DMA_FROMDEVICE); |
301 |
if (pci_dma_mapping_error(buffer_info->dma)) { |
302 |
dev_err(&pdev->dev, "RX DMA map failed\n"); |
303 |
adapter->rx_dma_failed++; |
304 |
/* cleanup skb */ |
305 |
dev_kfree_skb_any(skb); |
306 |
buffer_info->skb = NULL; |
307 |
break; |
308 |
} |
309 |
|
310 |
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); |
311 |
|
312 |
i++; |
313 |
if (i == rx_ring->count) |
314 |
i = 0; |
315 |
buffer_info = &rx_ring->buffer_info[i]; |
316 |
} |
317 |
|
318 |
no_buffers: |
319 |
if (rx_ring->next_to_use != i) { |
320 |
rx_ring->next_to_use = i; |
321 |
|
322 |
if (!(i--)) |
323 |
i = (rx_ring->count - 1); |
324 |
|
325 |
/* Force memory writes to complete before letting h/w |
326 |
* know there are new descriptors to fetch. (Only |
327 |
* applicable for weak-ordered memory model archs, |
328 |
* such as IA-64). */ |
329 |
wmb(); |
330 |
/* Hardware increments by 16 bytes, but packet split |
331 |
* descriptors are 32 bytes...so we increment tail |
332 |
* twice as much. |
333 |
*/ |
334 |
writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); |
335 |
} |
336 |
} |
337 |
|
338 |
/** |
339 |
* e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers |
340 |
* |
341 |
* @adapter: address of board private structure |
342 |
* @cleaned_count: number of buffers to allocate this pass |
343 |
**/ |
344 |
static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter, |
345 |
int cleaned_count) |
346 |
{ |
347 |
struct net_device *netdev = adapter->netdev; |
348 |
struct pci_dev *pdev = adapter->pdev; |
349 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
350 |
struct e1000_rx_desc *rx_desc; |
351 |
struct e1000_buffer *buffer_info; |
352 |
struct sk_buff *skb; |
353 |
unsigned int i; |
354 |
unsigned int bufsz = 256 - |
355 |
16 /*for skb_reserve */ - |
356 |
NET_IP_ALIGN; |
357 |
|
358 |
i = rx_ring->next_to_use; |
359 |
buffer_info = &rx_ring->buffer_info[i]; |
360 |
|
361 |
while (cleaned_count--) { |
362 |
skb = buffer_info->skb; |
363 |
if (skb) { |
364 |
skb_trim(skb, 0); |
365 |
goto check_page; |
366 |
} |
367 |
|
368 |
skb = netdev_alloc_skb(netdev, bufsz); |
369 |
if (!skb) { |
370 |
/* Better luck next round */ |
371 |
adapter->alloc_rx_buff_failed++; |
372 |
break; |
373 |
} |
374 |
|
375 |
/* Make buffer alignment 2 beyond a 16 byte boundary |
376 |
* this will result in a 16 byte aligned IP header after |
377 |
* the 14 byte MAC header is removed |
378 |
*/ |
379 |
skb_reserve(skb, NET_IP_ALIGN); |
380 |
|
381 |
buffer_info->skb = skb; |
382 |
check_page: |
383 |
/* allocate a new page if necessary */ |
384 |
if (!buffer_info->page) { |
385 |
buffer_info->page = alloc_page(GFP_ATOMIC); |
386 |
if (!buffer_info->page) { |
387 |
adapter->alloc_rx_buff_failed++; |
388 |
break; |
389 |
} |
390 |
} |
391 |
|
392 |
if (!buffer_info->dma) |
393 |
buffer_info->dma = pci_map_page(pdev, |
394 |
buffer_info->page, 0, |
395 |
PAGE_SIZE, |
396 |
PCI_DMA_FROMDEVICE); |
397 |
if (pci_dma_mapping_error(buffer_info->dma)) { |
398 |
dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); |
399 |
adapter->rx_dma_failed++; |
400 |
break; |
401 |
} |
402 |
|
403 |
rx_desc = E1000_RX_DESC(*rx_ring, i); |
404 |
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
405 |
|
406 |
i++; |
407 |
if (i == rx_ring->count) |
408 |
i = 0; |
409 |
buffer_info = &rx_ring->buffer_info[i]; |
410 |
} |
411 |
|
412 |
if (rx_ring->next_to_use != i) { |
413 |
rx_ring->next_to_use = i; |
414 |
if (i-- == 0) |
415 |
i = (rx_ring->count - 1); |
416 |
|
417 |
/* Force memory writes to complete before letting h/w |
418 |
* know there are new descriptors to fetch. (Only |
419 |
* applicable for weak-ordered memory model archs, |
420 |
* such as IA-64). */ |
421 |
wmb(); |
422 |
writel(i, adapter->hw.hw_addr + rx_ring->tail); |
423 |
} |
424 |
} |
425 |
|
426 |
/** |
427 |
* e1000_clean_rx_irq - Send received data up the network stack; legacy |
428 |
* @adapter: board private structure |
429 |
* |
430 |
* the return value indicates whether actual cleaning was done, there |
431 |
* is no guarantee that everything was cleaned |
432 |
**/ |
433 |
static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
434 |
int *work_done, int work_to_do) |
435 |
{ |
436 |
struct net_device *netdev = adapter->netdev; |
437 |
struct pci_dev *pdev = adapter->pdev; |
438 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
439 |
struct e1000_rx_desc *rx_desc, *next_rxd; |
440 |
struct e1000_buffer *buffer_info, *next_buffer; |
441 |
u32 length; |
442 |
unsigned int i; |
443 |
int cleaned_count = 0; |
444 |
bool cleaned = 0; |
445 |
unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
446 |
|
447 |
i = rx_ring->next_to_clean; |
448 |
rx_desc = E1000_RX_DESC(*rx_ring, i); |
449 |
buffer_info = &rx_ring->buffer_info[i]; |
450 |
|
451 |
while (rx_desc->status & E1000_RXD_STAT_DD) { |
452 |
struct sk_buff *skb; |
453 |
u8 status; |
454 |
|
455 |
if (*work_done >= work_to_do) |
456 |
break; |
457 |
(*work_done)++; |
458 |
|
459 |
status = rx_desc->status; |
460 |
skb = buffer_info->skb; |
461 |
buffer_info->skb = NULL; |
462 |
|
463 |
prefetch(skb->data - NET_IP_ALIGN); |
464 |
|
465 |
i++; |
466 |
if (i == rx_ring->count) |
467 |
i = 0; |
468 |
next_rxd = E1000_RX_DESC(*rx_ring, i); |
469 |
prefetch(next_rxd); |
470 |
|
471 |
next_buffer = &rx_ring->buffer_info[i]; |
472 |
|
473 |
cleaned = 1; |
474 |
cleaned_count++; |
475 |
pci_unmap_single(pdev, |
476 |
buffer_info->dma, |
477 |
adapter->rx_buffer_len, |
478 |
PCI_DMA_FROMDEVICE); |
479 |
buffer_info->dma = 0; |
480 |
|
481 |
length = le16_to_cpu(rx_desc->length); |
482 |
|
483 |
/* !EOP means multiple descriptors were used to store a single |
484 |
* packet, also make sure the frame isn't just CRC only */ |
485 |
if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { |
486 |
/* All receives must fit into a single buffer */ |
487 |
ndev_dbg(netdev, "%s: Receive packet consumed " |
488 |
"multiple buffers\n", netdev->name); |
489 |
/* recycle */ |
490 |
buffer_info->skb = skb; |
491 |
goto next_desc; |
492 |
} |
493 |
|
494 |
if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { |
495 |
/* recycle */ |
496 |
buffer_info->skb = skb; |
497 |
goto next_desc; |
498 |
} |
499 |
|
500 |
/* adjust length to remove Ethernet CRC */ |
501 |
length -= 4; |
502 |
|
503 |
/* probably a little skewed due to removing CRC */ |
504 |
total_rx_bytes += length; |
505 |
total_rx_packets++; |
506 |
|
507 |
/* code added for copybreak, this should improve |
508 |
* performance for small packets with large amounts |
509 |
* of reassembly being done in the stack */ |
510 |
if (length < copybreak) { |
511 |
struct sk_buff *new_skb = |
512 |
netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
513 |
if (new_skb) { |
514 |
skb_reserve(new_skb, NET_IP_ALIGN); |
515 |
memcpy(new_skb->data - NET_IP_ALIGN, |
516 |
skb->data - NET_IP_ALIGN, |
517 |
length + NET_IP_ALIGN); |
518 |
/* save the skb in buffer_info as good */ |
519 |
buffer_info->skb = skb; |
520 |
skb = new_skb; |
521 |
} |
522 |
/* else just continue with the old one */ |
523 |
} |
524 |
/* end copybreak code */ |
525 |
skb_put(skb, length); |
526 |
|
527 |
/* Receive Checksum Offload */ |
528 |
e1000_rx_checksum(adapter, |
529 |
(u32)(status) | |
530 |
((u32)(rx_desc->errors) << 24), |
531 |
le16_to_cpu(rx_desc->csum), skb); |
532 |
|
533 |
e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); |
534 |
|
535 |
next_desc: |
536 |
rx_desc->status = 0; |
537 |
|
538 |
/* return some buffers to hardware, one at a time is too slow */ |
539 |
if (cleaned_count >= E1000_RX_BUFFER_WRITE) { |
540 |
adapter->alloc_rx_buf(adapter, cleaned_count); |
541 |
cleaned_count = 0; |
542 |
} |
543 |
|
544 |
/* use prefetched values */ |
545 |
rx_desc = next_rxd; |
546 |
buffer_info = next_buffer; |
547 |
} |
548 |
rx_ring->next_to_clean = i; |
549 |
|
550 |
cleaned_count = e1000_desc_unused(rx_ring); |
551 |
if (cleaned_count) |
552 |
adapter->alloc_rx_buf(adapter, cleaned_count); |
553 |
|
554 |
adapter->total_rx_packets += total_rx_packets; |
555 |
adapter->total_rx_bytes += total_rx_bytes; |
556 |
return cleaned; |
557 |
} |
558 |
|
559 |
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, |
560 |
u16 length) |
561 |
{ |
562 |
bi->page = NULL; |
563 |
skb->len += length; |
564 |
skb->data_len += length; |
565 |
skb->truesize += length; |
566 |
} |
567 |
|
568 |
static void e1000_put_txbuf(struct e1000_adapter *adapter, |
569 |
struct e1000_buffer *buffer_info) |
570 |
{ |
571 |
if (buffer_info->dma) { |
572 |
pci_unmap_page(adapter->pdev, buffer_info->dma, |
573 |
buffer_info->length, PCI_DMA_TODEVICE); |
574 |
buffer_info->dma = 0; |
575 |
} |
576 |
if (buffer_info->skb) { |
577 |
dev_kfree_skb_any(buffer_info->skb); |
578 |
buffer_info->skb = NULL; |
579 |
} |
580 |
} |
581 |
|
582 |
static void e1000_print_tx_hang(struct e1000_adapter *adapter) |
583 |
{ |
584 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
585 |
unsigned int i = tx_ring->next_to_clean; |
586 |
unsigned int eop = tx_ring->buffer_info[i].next_to_watch; |
587 |
struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); |
588 |
struct net_device *netdev = adapter->netdev; |
589 |
|
590 |
/* detected Tx unit hang */ |
591 |
ndev_err(netdev, |
592 |
"Detected Tx Unit Hang:\n" |
593 |
" TDH <%x>\n" |
594 |
" TDT <%x>\n" |
595 |
" next_to_use <%x>\n" |
596 |
" next_to_clean <%x>\n" |
597 |
"buffer_info[next_to_clean]:\n" |
598 |
" time_stamp <%lx>\n" |
599 |
" next_to_watch <%x>\n" |
600 |
" jiffies <%lx>\n" |
601 |
" next_to_watch.status <%x>\n", |
602 |
readl(adapter->hw.hw_addr + tx_ring->head), |
603 |
readl(adapter->hw.hw_addr + tx_ring->tail), |
604 |
tx_ring->next_to_use, |
605 |
tx_ring->next_to_clean, |
606 |
tx_ring->buffer_info[eop].time_stamp, |
607 |
eop, |
608 |
jiffies, |
609 |
eop_desc->upper.fields.status); |
610 |
} |
611 |
|
612 |
/** |
613 |
* e1000_clean_tx_irq - Reclaim resources after transmit completes |
614 |
* @adapter: board private structure |
615 |
* |
616 |
* the return value indicates whether actual cleaning was done, there |
617 |
* is no guarantee that everything was cleaned |
618 |
**/ |
619 |
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) |
620 |
{ |
621 |
struct net_device *netdev = adapter->netdev; |
622 |
struct e1000_hw *hw = &adapter->hw; |
623 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
624 |
struct e1000_tx_desc *tx_desc, *eop_desc; |
625 |
struct e1000_buffer *buffer_info; |
626 |
unsigned int i, eop; |
627 |
unsigned int count = 0; |
628 |
bool cleaned = 0; |
629 |
unsigned int total_tx_bytes = 0, total_tx_packets = 0; |
630 |
|
631 |
i = tx_ring->next_to_clean; |
632 |
eop = tx_ring->buffer_info[i].next_to_watch; |
633 |
eop_desc = E1000_TX_DESC(*tx_ring, eop); |
634 |
|
635 |
while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { |
636 |
for (cleaned = 0; !cleaned; ) { |
637 |
tx_desc = E1000_TX_DESC(*tx_ring, i); |
638 |
buffer_info = &tx_ring->buffer_info[i]; |
639 |
cleaned = (i == eop); |
640 |
|
641 |
if (cleaned) { |
642 |
struct sk_buff *skb = buffer_info->skb; |
643 |
unsigned int segs, bytecount; |
644 |
segs = skb_shinfo(skb)->gso_segs ?: 1; |
645 |
/* multiply data chunks by size of headers */ |
646 |
bytecount = ((segs - 1) * skb_headlen(skb)) + |
647 |
skb->len; |
648 |
total_tx_packets += segs; |
649 |
total_tx_bytes += bytecount; |
650 |
} |
651 |
|
652 |
e1000_put_txbuf(adapter, buffer_info); |
653 |
tx_desc->upper.data = 0; |
654 |
|
655 |
i++; |
656 |
if (i == tx_ring->count) |
657 |
i = 0; |
658 |
} |
659 |
|
660 |
eop = tx_ring->buffer_info[i].next_to_watch; |
661 |
eop_desc = E1000_TX_DESC(*tx_ring, eop); |
662 |
#define E1000_TX_WEIGHT 64 |
663 |
/* weight of a sort for tx, to avoid endless transmit cleanup */ |
664 |
if (count++ == E1000_TX_WEIGHT) |
665 |
break; |
666 |
} |
667 |
|
668 |
tx_ring->next_to_clean = i; |
669 |
|
670 |
#define TX_WAKE_THRESHOLD 32 |
671 |
if (cleaned && netif_carrier_ok(netdev) && |
672 |
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { |
673 |
/* Make sure that anybody stopping the queue after this |
674 |
* sees the new next_to_clean. |
675 |
*/ |
676 |
smp_mb(); |
677 |
|
678 |
if (netif_queue_stopped(netdev) && |
679 |
!(test_bit(__E1000_DOWN, &adapter->state))) { |
680 |
netif_wake_queue(netdev); |
681 |
++adapter->restart_queue; |
682 |
} |
683 |
} |
684 |
|
685 |
if (adapter->detect_tx_hung) { |
686 |
/* Detect a transmit hang in hardware, this serializes the |
687 |
* check with the clearing of time_stamp and movement of i */ |
688 |
adapter->detect_tx_hung = 0; |
689 |
if (tx_ring->buffer_info[eop].dma && |
690 |
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp |
691 |
+ (adapter->tx_timeout_factor * HZ)) |
692 |
&& !(er32(STATUS) & |
693 |
E1000_STATUS_TXOFF)) { |
694 |
e1000_print_tx_hang(adapter); |
695 |
netif_stop_queue(netdev); |
696 |
} |
697 |
} |
698 |
adapter->total_tx_bytes += total_tx_bytes; |
699 |
adapter->total_tx_packets += total_tx_packets; |
700 |
return cleaned; |
701 |
} |
702 |
|
703 |
/** |
704 |
* e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy |
705 |
* @adapter: board private structure |
706 |
* |
707 |
* the return value indicates whether actual cleaning was done, there |
708 |
* is no guarantee that everything was cleaned |
709 |
**/ |
710 |
static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter, |
711 |
int *work_done, int work_to_do) |
712 |
{ |
713 |
struct net_device *netdev = adapter->netdev; |
714 |
struct pci_dev *pdev = adapter->pdev; |
715 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
716 |
struct e1000_rx_desc *rx_desc, *next_rxd; |
717 |
struct e1000_buffer *buffer_info, *next_buffer; |
718 |
u32 length; |
719 |
unsigned int i; |
720 |
int cleaned_count = 0; |
721 |
bool cleaned = 0; |
722 |
unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
723 |
|
724 |
i = rx_ring->next_to_clean; |
725 |
rx_desc = E1000_RX_DESC(*rx_ring, i); |
726 |
buffer_info = &rx_ring->buffer_info[i]; |
727 |
|
728 |
while (rx_desc->status & E1000_RXD_STAT_DD) { |
729 |
struct sk_buff *skb; |
730 |
u8 status; |
731 |
|
732 |
if (*work_done >= work_to_do) |
733 |
break; |
734 |
(*work_done)++; |
735 |
|
736 |
status = rx_desc->status; |
737 |
skb = buffer_info->skb; |
738 |
buffer_info->skb = NULL; |
739 |
|
740 |
i++; |
741 |
if (i == rx_ring->count) |
742 |
i = 0; |
743 |
next_rxd = E1000_RX_DESC(*rx_ring, i); |
744 |
prefetch(next_rxd); |
745 |
|
746 |
next_buffer = &rx_ring->buffer_info[i]; |
747 |
|
748 |
cleaned = 1; |
749 |
cleaned_count++; |
750 |
pci_unmap_page(pdev, |
751 |
buffer_info->dma, |
752 |
PAGE_SIZE, |
753 |
PCI_DMA_FROMDEVICE); |
754 |
buffer_info->dma = 0; |
755 |
|
756 |
length = le16_to_cpu(rx_desc->length); |
757 |
|
758 |
/* errors is only valid for DD + EOP descriptors */ |
759 |
if ((status & E1000_RXD_STAT_EOP) && |
760 |
(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
761 |
/* recycle both page and skb */ |
762 |
buffer_info->skb = skb; |
763 |
/* an error means any chain goes out the window too */ |
764 |
if (rx_ring->rx_skb_top) |
765 |
dev_kfree_skb(rx_ring->rx_skb_top); |
766 |
rx_ring->rx_skb_top = NULL; |
767 |
goto next_desc; |
768 |
} |
769 |
|
770 |
#define rxtop rx_ring->rx_skb_top |
771 |
if (!(status & E1000_RXD_STAT_EOP)) { |
772 |
/* this descriptor is only the beginning (or middle) */ |
773 |
if (!rxtop) { |
774 |
/* this is the beginning of a chain */ |
775 |
rxtop = skb; |
776 |
skb_fill_page_desc(rxtop, 0, buffer_info->page, |
777 |
0, length); |
778 |
} else { |
779 |
/* this is the middle of a chain */ |
780 |
skb_fill_page_desc(rxtop, |
781 |
skb_shinfo(rxtop)->nr_frags, |
782 |
buffer_info->page, 0, |
783 |
length); |
784 |
/* re-use the skb, only consumed the page */ |
785 |
buffer_info->skb = skb; |
786 |
} |
787 |
e1000_consume_page(buffer_info, rxtop, length); |
788 |
goto next_desc; |
789 |
} else { |
790 |
if (rxtop) { |
791 |
/* end of the chain */ |
792 |
skb_fill_page_desc(rxtop, |
793 |
skb_shinfo(rxtop)->nr_frags, |
794 |
buffer_info->page, 0, length); |
795 |
/* re-use the current skb, we only consumed the |
796 |
* page */ |
797 |
buffer_info->skb = skb; |
798 |
skb = rxtop; |
799 |
rxtop = NULL; |
800 |
e1000_consume_page(buffer_info, skb, length); |
801 |
} else { |
802 |
/* no chain, got EOP, this buf is the packet |
803 |
* copybreak to save the put_page/alloc_page */ |
804 |
if (length <= copybreak && |
805 |
skb_tailroom(skb) >= length) { |
806 |
u8 *vaddr; |
807 |
vaddr = kmap_atomic(buffer_info->page, |
808 |
KM_SKB_DATA_SOFTIRQ); |
809 |
memcpy(skb_tail_pointer(skb), |
810 |
vaddr, length); |
811 |
kunmap_atomic(vaddr, |
812 |
KM_SKB_DATA_SOFTIRQ); |
813 |
/* re-use the page, so don't erase |
814 |
* buffer_info->page */ |
815 |
skb_put(skb, length); |
816 |
} else { |
817 |
skb_fill_page_desc(skb, 0, |
818 |
buffer_info->page, 0, |
819 |
length); |
820 |
e1000_consume_page(buffer_info, skb, |
821 |
length); |
822 |
} |
823 |
} |
824 |
} |
825 |
|
826 |
/* Receive Checksum Offload XXX recompute due to CRC strip? */ |
827 |
e1000_rx_checksum(adapter, |
828 |
(u32)(status) | |
829 |
((u32)(rx_desc->errors) << 24), |
830 |
le16_to_cpu(rx_desc->csum), skb); |
831 |
|
832 |
pskb_trim(skb, skb->len - 4); |
833 |
|
834 |
/* probably a little skewed due to removing CRC */ |
835 |
total_rx_bytes += skb->len; |
836 |
total_rx_packets++; |
837 |
|
838 |
/* eth type trans needs skb->data to point to something */ |
839 |
if (!pskb_may_pull(skb, ETH_HLEN)) { |
840 |
ndev_err(netdev, "__pskb_pull_tail failed.\n"); |
841 |
dev_kfree_skb(skb); |
842 |
goto next_desc; |
843 |
} |
844 |
|
845 |
e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); |
846 |
|
847 |
next_desc: |
848 |
rx_desc->status = 0; |
849 |
|
850 |
/* return some buffers to hardware, one at a time is too slow */ |
851 |
if (cleaned_count >= E1000_RX_BUFFER_WRITE) { |
852 |
adapter->alloc_rx_buf(adapter, cleaned_count); |
853 |
cleaned_count = 0; |
854 |
} |
855 |
|
856 |
/* use prefetched values */ |
857 |
rx_desc = next_rxd; |
858 |
buffer_info = next_buffer; |
859 |
} |
860 |
rx_ring->next_to_clean = i; |
861 |
|
862 |
cleaned_count = e1000_desc_unused(rx_ring); |
863 |
if (cleaned_count) |
864 |
adapter->alloc_rx_buf(adapter, cleaned_count); |
865 |
|
866 |
adapter->total_rx_packets += total_rx_packets; |
867 |
adapter->total_rx_bytes += total_rx_bytes; |
868 |
return cleaned; |
869 |
} |
870 |
|
871 |
/** |
872 |
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split |
873 |
* @adapter: board private structure |
874 |
* |
875 |
* the return value indicates whether actual cleaning was done, there |
876 |
* is no guarantee that everything was cleaned |
877 |
**/ |
878 |
static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
879 |
int *work_done, int work_to_do) |
880 |
{ |
881 |
union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
882 |
struct net_device *netdev = adapter->netdev; |
883 |
struct pci_dev *pdev = adapter->pdev; |
884 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
885 |
struct e1000_buffer *buffer_info, *next_buffer; |
886 |
struct e1000_ps_page *ps_page; |
887 |
struct sk_buff *skb; |
888 |
unsigned int i, j; |
889 |
u32 length, staterr; |
890 |
int cleaned_count = 0; |
891 |
bool cleaned = 0; |
892 |
unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
893 |
|
894 |
i = rx_ring->next_to_clean; |
895 |
rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
896 |
staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
897 |
buffer_info = &rx_ring->buffer_info[i]; |
898 |
|
899 |
while (staterr & E1000_RXD_STAT_DD) { |
900 |
if (*work_done >= work_to_do) |
901 |
break; |
902 |
(*work_done)++; |
903 |
skb = buffer_info->skb; |
904 |
|
905 |
/* in the packet split case this is header only */ |
906 |
prefetch(skb->data - NET_IP_ALIGN); |
907 |
|
908 |
i++; |
909 |
if (i == rx_ring->count) |
910 |
i = 0; |
911 |
next_rxd = E1000_RX_DESC_PS(*rx_ring, i); |
912 |
prefetch(next_rxd); |
913 |
|
914 |
next_buffer = &rx_ring->buffer_info[i]; |
915 |
|
916 |
cleaned = 1; |
917 |
cleaned_count++; |
918 |
pci_unmap_single(pdev, buffer_info->dma, |
919 |
adapter->rx_ps_bsize0, |
920 |
PCI_DMA_FROMDEVICE); |
921 |
buffer_info->dma = 0; |
922 |
|
923 |
if (!(staterr & E1000_RXD_STAT_EOP)) { |
924 |
ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " |
925 |
"up the full packet\n", netdev->name); |
926 |
dev_kfree_skb_irq(skb); |
927 |
goto next_desc; |
928 |
} |
929 |
|
930 |
if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { |
931 |
dev_kfree_skb_irq(skb); |
932 |
goto next_desc; |
933 |
} |
934 |
|
935 |
length = le16_to_cpu(rx_desc->wb.middle.length0); |
936 |
|
937 |
if (!length) { |
938 |
ndev_dbg(netdev, "%s: Last part of the packet spanning" |
939 |
" multiple descriptors\n", netdev->name); |
940 |
dev_kfree_skb_irq(skb); |
941 |
goto next_desc; |
942 |
} |
943 |
|
944 |
/* Good Receive */ |
945 |
skb_put(skb, length); |
946 |
|
947 |
{ |
948 |
/* this looks ugly, but it seems compiler issues make it |
949 |
more efficient than reusing j */ |
950 |
int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); |
951 |
|
952 |
/* page alloc/put takes too long and effects small packet |
953 |
* throughput, so unsplit small packets and save the alloc/put*/ |
954 |
if (l1 && (l1 <= copybreak) && |
955 |
((length + l1) <= adapter->rx_ps_bsize0)) { |
956 |
u8 *vaddr; |
957 |
|
958 |
ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS]; |
959 |
|
960 |
/* there is no documentation about how to call |
961 |
* kmap_atomic, so we can't hold the mapping |
962 |
* very long */ |
963 |
pci_dma_sync_single_for_cpu(pdev, ps_page->dma, |
964 |
PAGE_SIZE, PCI_DMA_FROMDEVICE); |
965 |
vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); |
966 |
memcpy(skb_tail_pointer(skb), vaddr, l1); |
967 |
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); |
968 |
pci_dma_sync_single_for_device(pdev, ps_page->dma, |
969 |
PAGE_SIZE, PCI_DMA_FROMDEVICE); |
970 |
/* remove the CRC */ |
971 |
l1 -= 4; |
972 |
skb_put(skb, l1); |
973 |
goto copydone; |
974 |
} /* if */ |
975 |
} |
976 |
|
977 |
for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
978 |
length = le16_to_cpu(rx_desc->wb.upper.length[j]); |
979 |
if (!length) |
980 |
break; |
981 |
|
982 |
ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j]; |
983 |
pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, |
984 |
PCI_DMA_FROMDEVICE); |
985 |
ps_page->dma = 0; |
986 |
skb_fill_page_desc(skb, j, ps_page->page, 0, length); |
987 |
ps_page->page = NULL; |
988 |
skb->len += length; |
989 |
skb->data_len += length; |
990 |
skb->truesize += length; |
991 |
} |
992 |
|
993 |
/* strip the ethernet crc, problem is we're using pages now so |
994 |
* this whole operation can get a little cpu intensive */ |
995 |
pskb_trim(skb, skb->len - 4); |
996 |
|
997 |
copydone: |
998 |
total_rx_bytes += skb->len; |
999 |
total_rx_packets++; |
1000 |
|
1001 |
e1000_rx_checksum(adapter, staterr, le16_to_cpu( |
1002 |
rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
1003 |
|
1004 |
if (rx_desc->wb.upper.header_status & |
1005 |
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) |
1006 |
adapter->rx_hdr_split++; |
1007 |
|
1008 |
e1000_receive_skb(adapter, netdev, skb, |
1009 |
staterr, rx_desc->wb.middle.vlan); |
1010 |
|
1011 |
next_desc: |
1012 |
rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); |
1013 |
buffer_info->skb = NULL; |
1014 |
|
1015 |
/* return some buffers to hardware, one at a time is too slow */ |
1016 |
if (cleaned_count >= E1000_RX_BUFFER_WRITE) { |
1017 |
adapter->alloc_rx_buf(adapter, cleaned_count); |
1018 |
cleaned_count = 0; |
1019 |
} |
1020 |
|
1021 |
/* use prefetched values */ |
1022 |
rx_desc = next_rxd; |
1023 |
buffer_info = next_buffer; |
1024 |
|
1025 |
staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
1026 |
} |
1027 |
rx_ring->next_to_clean = i; |
1028 |
|
1029 |
cleaned_count = e1000_desc_unused(rx_ring); |
1030 |
if (cleaned_count) |
1031 |
adapter->alloc_rx_buf(adapter, cleaned_count); |
1032 |
|
1033 |
adapter->total_rx_packets += total_rx_packets; |
1034 |
adapter->total_rx_bytes += total_rx_bytes; |
1035 |
return cleaned; |
1036 |
} |
1037 |
|
1038 |
/** |
1039 |
* e1000_clean_rx_ring - Free Rx Buffers per Queue |
1040 |
* @adapter: board private structure |
1041 |
**/ |
1042 |
static void e1000_clean_rx_ring(struct e1000_adapter *adapter) |
1043 |
{ |
1044 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
1045 |
struct e1000_buffer *buffer_info; |
1046 |
struct e1000_ps_page *ps_page; |
1047 |
struct pci_dev *pdev = adapter->pdev; |
1048 |
unsigned long size; |
1049 |
unsigned int i, j; |
1050 |
|
1051 |
/* Free all the Rx ring sk_buffs */ |
1052 |
for (i = 0; i < rx_ring->count; i++) { |
1053 |
buffer_info = &rx_ring->buffer_info[i]; |
1054 |
if (buffer_info->dma) { |
1055 |
if (adapter->clean_rx == e1000_clean_rx_irq) |
1056 |
pci_unmap_single(pdev, buffer_info->dma, |
1057 |
adapter->rx_buffer_len, |
1058 |
PCI_DMA_FROMDEVICE); |
1059 |
else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo) |
1060 |
pci_unmap_page(pdev, buffer_info->dma, |
1061 |
PAGE_SIZE, PCI_DMA_FROMDEVICE); |
1062 |
else if (adapter->clean_rx == e1000_clean_rx_irq_ps) |
1063 |
pci_unmap_single(pdev, buffer_info->dma, |
1064 |
adapter->rx_ps_bsize0, |
1065 |
PCI_DMA_FROMDEVICE); |
1066 |
buffer_info->dma = 0; |
1067 |
} |
1068 |
|
1069 |
if (buffer_info->page) { |
1070 |
put_page(buffer_info->page); |
1071 |
buffer_info->page = NULL; |
1072 |
} |
1073 |
|
1074 |
if (buffer_info->skb) { |
1075 |
dev_kfree_skb(buffer_info->skb); |
1076 |
buffer_info->skb = NULL; |
1077 |
} |
1078 |
|
1079 |
for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
1080 |
ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) |
1081 |
+ j]; |
1082 |
if (!ps_page->page) |
1083 |
break; |
1084 |
pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, |
1085 |
PCI_DMA_FROMDEVICE); |
1086 |
ps_page->dma = 0; |
1087 |
put_page(ps_page->page); |
1088 |
ps_page->page = NULL; |
1089 |
} |
1090 |
} |
1091 |
|
1092 |
/* there also may be some cached data from a chained receive */ |
1093 |
if (rx_ring->rx_skb_top) { |
1094 |
dev_kfree_skb(rx_ring->rx_skb_top); |
1095 |
rx_ring->rx_skb_top = NULL; |
1096 |
} |
1097 |
|
1098 |
size = sizeof(struct e1000_buffer) * rx_ring->count; |
1099 |
memset(rx_ring->buffer_info, 0, size); |
1100 |
size = sizeof(struct e1000_ps_page) |
1101 |
* (rx_ring->count * PS_PAGE_BUFFERS); |
1102 |
memset(rx_ring->ps_pages, 0, size); |
1103 |
|
1104 |
/* Zero out the descriptor ring */ |
1105 |
memset(rx_ring->desc, 0, rx_ring->size); |
1106 |
|
1107 |
rx_ring->next_to_clean = 0; |
1108 |
rx_ring->next_to_use = 0; |
1109 |
|
1110 |
writel(0, adapter->hw.hw_addr + rx_ring->head); |
1111 |
writel(0, adapter->hw.hw_addr + rx_ring->tail); |
1112 |
} |
1113 |
|
1114 |
/** |
1115 |
* e1000_intr_msi - Interrupt Handler |
1116 |
* @irq: interrupt number |
1117 |
* @data: pointer to a network interface device structure |
1118 |
**/ |
1119 |
static irqreturn_t e1000_intr_msi(int irq, void *data) |
1120 |
{ |
1121 |
struct net_device *netdev = data; |
1122 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
1123 |
struct e1000_hw *hw = &adapter->hw; |
1124 |
u32 icr = er32(ICR); |
1125 |
|
1126 |
/* read ICR disables interrupts using IAM, so keep up with our |
1127 |
* enable/disable accounting */ |
1128 |
atomic_inc(&adapter->irq_sem); |
1129 |
|
1130 |
if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { |
1131 |
hw->mac.get_link_status = 1; |
1132 |
/* ICH8 workaround-- Call gig speed drop workaround on cable |
1133 |
* disconnect (LSC) before accessing any PHY registers */ |
1134 |
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1135 |
(!(er32(STATUS) & E1000_STATUS_LU))) |
1136 |
e1000e_gig_downshift_workaround_ich8lan(hw); |
1137 |
|
1138 |
/* 80003ES2LAN workaround-- For packet buffer work-around on |
1139 |
* link down event; disable receives here in the ISR and reset |
1140 |
* adapter in watchdog */ |
1141 |
if (netif_carrier_ok(netdev) && |
1142 |
adapter->flags & FLAG_RX_NEEDS_RESTART) { |
1143 |
/* disable receives */ |
1144 |
u32 rctl = er32(RCTL); |
1145 |
ew32(RCTL, rctl & ~E1000_RCTL_EN); |
1146 |
} |
1147 |
/* guard against interrupt when we're going down */ |
1148 |
if (!test_bit(__E1000_DOWN, &adapter->state)) |
1149 |
mod_timer(&adapter->watchdog_timer, jiffies + 1); |
1150 |
} |
1151 |
|
1152 |
if (netif_rx_schedule_prep(netdev, &adapter->napi)) { |
1153 |
adapter->total_tx_bytes = 0; |
1154 |
adapter->total_tx_packets = 0; |
1155 |
adapter->total_rx_bytes = 0; |
1156 |
adapter->total_rx_packets = 0; |
1157 |
__netif_rx_schedule(netdev, &adapter->napi); |
1158 |
} else { |
1159 |
atomic_dec(&adapter->irq_sem); |
1160 |
} |
1161 |
|
1162 |
return IRQ_HANDLED; |
1163 |
} |
1164 |
|
1165 |
/** |
1166 |
* e1000_intr - Interrupt Handler |
1167 |
* @irq: interrupt number |
1168 |
* @data: pointer to a network interface device structure |
1169 |
**/ |
1170 |
static irqreturn_t e1000_intr(int irq, void *data) |
1171 |
{ |
1172 |
struct net_device *netdev = data; |
1173 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
1174 |
struct e1000_hw *hw = &adapter->hw; |
1175 |
|
1176 |
u32 rctl, icr = er32(ICR); |
1177 |
if (!icr) |
1178 |
return IRQ_NONE; /* Not our interrupt */ |
1179 |
|
1180 |
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
1181 |
* not set, then the adapter didn't send an interrupt */ |
1182 |
if (!(icr & E1000_ICR_INT_ASSERTED)) |
1183 |
return IRQ_NONE; |
1184 |
|
1185 |
/* Interrupt Auto-Mask...upon reading ICR, |
1186 |
* interrupts are masked. No need for the |
1187 |
* IMC write, but it does mean we should |
1188 |
* account for it ASAP. */ |
1189 |
atomic_inc(&adapter->irq_sem); |
1190 |
|
1191 |
if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { |
1192 |
hw->mac.get_link_status = 1; |
1193 |
/* ICH8 workaround-- Call gig speed drop workaround on cable |
1194 |
* disconnect (LSC) before accessing any PHY registers */ |
1195 |
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1196 |
(!(er32(STATUS) & E1000_STATUS_LU))) |
1197 |
e1000e_gig_downshift_workaround_ich8lan(hw); |
1198 |
|
1199 |
/* 80003ES2LAN workaround-- |
1200 |
* For packet buffer work-around on link down event; |
1201 |
* disable receives here in the ISR and |
1202 |
* reset adapter in watchdog |
1203 |
*/ |
1204 |
if (netif_carrier_ok(netdev) && |
1205 |
(adapter->flags & FLAG_RX_NEEDS_RESTART)) { |
1206 |
/* disable receives */ |
1207 |
rctl = er32(RCTL); |
1208 |
ew32(RCTL, rctl & ~E1000_RCTL_EN); |
1209 |
} |
1210 |
/* guard against interrupt when we're going down */ |
1211 |
if (!test_bit(__E1000_DOWN, &adapter->state)) |
1212 |
mod_timer(&adapter->watchdog_timer, jiffies + 1); |
1213 |
} |
1214 |
|
1215 |
if (netif_rx_schedule_prep(netdev, &adapter->napi)) { |
1216 |
adapter->total_tx_bytes = 0; |
1217 |
adapter->total_tx_packets = 0; |
1218 |
adapter->total_rx_bytes = 0; |
1219 |
adapter->total_rx_packets = 0; |
1220 |
__netif_rx_schedule(netdev, &adapter->napi); |
1221 |
} else { |
1222 |
atomic_dec(&adapter->irq_sem); |
1223 |
} |
1224 |
|
1225 |
return IRQ_HANDLED; |
1226 |
} |
1227 |
|
1228 |
static int e1000_request_irq(struct e1000_adapter *adapter) |
1229 |
{ |
1230 |
struct net_device *netdev = adapter->netdev; |
1231 |
void (*handler) = &e1000_intr; |
1232 |
int irq_flags = IRQF_SHARED; |
1233 |
int err; |
1234 |
|
1235 |
err = pci_enable_msi(adapter->pdev); |
1236 |
if (err) { |
1237 |
ndev_warn(netdev, |
1238 |
"Unable to allocate MSI interrupt Error: %d\n", err); |
1239 |
} else { |
1240 |
adapter->flags |= FLAG_MSI_ENABLED; |
1241 |
handler = &e1000_intr_msi; |
1242 |
irq_flags = 0; |
1243 |
} |
1244 |
|
1245 |
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
1246 |
netdev); |
1247 |
if (err) { |
1248 |
if (adapter->flags & FLAG_MSI_ENABLED) |
1249 |
pci_disable_msi(adapter->pdev); |
1250 |
ndev_err(netdev, |
1251 |
"Unable to allocate interrupt Error: %d\n", err); |
1252 |
} |
1253 |
|
1254 |
return err; |
1255 |
} |
1256 |
|
1257 |
static void e1000_free_irq(struct e1000_adapter *adapter) |
1258 |
{ |
1259 |
struct net_device *netdev = adapter->netdev; |
1260 |
|
1261 |
free_irq(adapter->pdev->irq, netdev); |
1262 |
if (adapter->flags & FLAG_MSI_ENABLED) { |
1263 |
pci_disable_msi(adapter->pdev); |
1264 |
adapter->flags &= ~FLAG_MSI_ENABLED; |
1265 |
} |
1266 |
} |
1267 |
|
1268 |
/** |
1269 |
* e1000_irq_disable - Mask off interrupt generation on the NIC |
1270 |
**/ |
1271 |
static void e1000_irq_disable(struct e1000_adapter *adapter) |
1272 |
{ |
1273 |
struct e1000_hw *hw = &adapter->hw; |
1274 |
|
1275 |
atomic_inc(&adapter->irq_sem); |
1276 |
ew32(IMC, ~0); |
1277 |
e1e_flush(); |
1278 |
synchronize_irq(adapter->pdev->irq); |
1279 |
} |
1280 |
|
1281 |
/** |
1282 |
* e1000_irq_enable - Enable default interrupt generation settings |
1283 |
**/ |
1284 |
static void e1000_irq_enable(struct e1000_adapter *adapter) |
1285 |
{ |
1286 |
struct e1000_hw *hw = &adapter->hw; |
1287 |
|
1288 |
if (atomic_dec_and_test(&adapter->irq_sem)) { |
1289 |
ew32(IMS, IMS_ENABLE_MASK); |
1290 |
e1e_flush(); |
1291 |
} |
1292 |
} |
1293 |
|
1294 |
/** |
1295 |
* e1000_get_hw_control - get control of the h/w from f/w |
1296 |
* @adapter: address of board private structure |
1297 |
* |
1298 |
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. |
1299 |
* For ASF and Pass Through versions of f/w this means that |
1300 |
* the driver is loaded. For AMT version (only with 82573) |
1301 |
* of the f/w this means that the network i/f is open. |
1302 |
**/ |
1303 |
static void e1000_get_hw_control(struct e1000_adapter *adapter) |
1304 |
{ |
1305 |
struct e1000_hw *hw = &adapter->hw; |
1306 |
u32 ctrl_ext; |
1307 |
u32 swsm; |
1308 |
|
1309 |
/* Let firmware know the driver has taken over */ |
1310 |
if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { |
1311 |
swsm = er32(SWSM); |
1312 |
ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); |
1313 |
} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { |
1314 |
ctrl_ext = er32(CTRL_EXT); |
1315 |
ew32(CTRL_EXT, |
1316 |
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
1317 |
} |
1318 |
} |
1319 |
|
1320 |
/** |
1321 |
* e1000_release_hw_control - release control of the h/w to f/w |
1322 |
* @adapter: address of board private structure |
1323 |
* |
1324 |
* e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. |
1325 |
* For ASF and Pass Through versions of f/w this means that the |
1326 |
* driver is no longer loaded. For AMT version (only with 82573) i |
1327 |
* of the f/w this means that the network i/f is closed. |
1328 |
* |
1329 |
**/ |
1330 |
static void e1000_release_hw_control(struct e1000_adapter *adapter) |
1331 |
{ |
1332 |
struct e1000_hw *hw = &adapter->hw; |
1333 |
u32 ctrl_ext; |
1334 |
u32 swsm; |
1335 |
|
1336 |
/* Let firmware taken over control of h/w */ |
1337 |
if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { |
1338 |
swsm = er32(SWSM); |
1339 |
ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); |
1340 |
} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { |
1341 |
ctrl_ext = er32(CTRL_EXT); |
1342 |
ew32(CTRL_EXT, |
1343 |
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
1344 |
} |
1345 |
} |
1346 |
|
1347 |
static void e1000_release_manageability(struct e1000_adapter *adapter) |
1348 |
{ |
1349 |
if (adapter->flags & FLAG_MNG_PT_ENABLED) { |
1350 |
struct e1000_hw *hw = &adapter->hw; |
1351 |
|
1352 |
u32 manc = er32(MANC); |
1353 |
|
1354 |
/* re-enable hardware interception of ARP */ |
1355 |
manc |= E1000_MANC_ARP_EN; |
1356 |
manc &= ~E1000_MANC_EN_MNG2HOST; |
1357 |
|
1358 |
/* don't explicitly have to mess with MANC2H since |
1359 |
* MANC has an enable disable that gates MANC2H */ |
1360 |
ew32(MANC, manc); |
1361 |
} |
1362 |
} |
1363 |
|
1364 |
/** |
1365 |
* @e1000_alloc_ring - allocate memory for a ring structure |
1366 |
**/ |
1367 |
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, |
1368 |
struct e1000_ring *ring) |
1369 |
{ |
1370 |
struct pci_dev *pdev = adapter->pdev; |
1371 |
|
1372 |
ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, |
1373 |
GFP_KERNEL); |
1374 |
if (!ring->desc) |
1375 |
return -ENOMEM; |
1376 |
|
1377 |
return 0; |
1378 |
} |
1379 |
|
1380 |
/** |
1381 |
* e1000e_setup_tx_resources - allocate Tx resources (Descriptors) |
1382 |
* @adapter: board private structure |
1383 |
* |
1384 |
* Return 0 on success, negative on failure |
1385 |
**/ |
1386 |
int e1000e_setup_tx_resources(struct e1000_adapter *adapter) |
1387 |
{ |
1388 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
1389 |
int err = -ENOMEM, size; |
1390 |
|
1391 |
size = sizeof(struct e1000_buffer) * tx_ring->count; |
1392 |
tx_ring->buffer_info = vmalloc(size); |
1393 |
if (!tx_ring->buffer_info) |
1394 |
goto err; |
1395 |
memset(tx_ring->buffer_info, 0, size); |
1396 |
|
1397 |
/* round up to nearest 4K */ |
1398 |
tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); |
1399 |
tx_ring->size = ALIGN(tx_ring->size, 4096); |
1400 |
|
1401 |
err = e1000_alloc_ring_dma(adapter, tx_ring); |
1402 |
if (err) |
1403 |
goto err; |
1404 |
|
1405 |
tx_ring->next_to_use = 0; |
1406 |
tx_ring->next_to_clean = 0; |
1407 |
spin_lock_init(&adapter->tx_queue_lock); |
1408 |
|
1409 |
return 0; |
1410 |
err: |
1411 |
vfree(tx_ring->buffer_info); |
1412 |
ndev_err(adapter->netdev, |
1413 |
"Unable to allocate memory for the transmit descriptor ring\n"); |
1414 |
return err; |
1415 |
} |
1416 |
|
1417 |
/** |
1418 |
* e1000e_setup_rx_resources - allocate Rx resources (Descriptors) |
1419 |
* @adapter: board private structure |
1420 |
* |
1421 |
* Returns 0 on success, negative on failure |
1422 |
**/ |
1423 |
int e1000e_setup_rx_resources(struct e1000_adapter *adapter) |
1424 |
{ |
1425 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
1426 |
int size, desc_len, err = -ENOMEM; |
1427 |
|
1428 |
size = sizeof(struct e1000_buffer) * rx_ring->count; |
1429 |
rx_ring->buffer_info = vmalloc(size); |
1430 |
if (!rx_ring->buffer_info) |
1431 |
goto err; |
1432 |
memset(rx_ring->buffer_info, 0, size); |
1433 |
|
1434 |
rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS, |
1435 |
sizeof(struct e1000_ps_page), |
1436 |
GFP_KERNEL); |
1437 |
if (!rx_ring->ps_pages) |
1438 |
goto err; |
1439 |
|
1440 |
desc_len = sizeof(union e1000_rx_desc_packet_split); |
1441 |
|
1442 |
/* Round up to nearest 4K */ |
1443 |
rx_ring->size = rx_ring->count * desc_len; |
1444 |
rx_ring->size = ALIGN(rx_ring->size, 4096); |
1445 |
|
1446 |
err = e1000_alloc_ring_dma(adapter, rx_ring); |
1447 |
if (err) |
1448 |
goto err; |
1449 |
|
1450 |
rx_ring->next_to_clean = 0; |
1451 |
rx_ring->next_to_use = 0; |
1452 |
rx_ring->rx_skb_top = NULL; |
1453 |
|
1454 |
return 0; |
1455 |
err: |
1456 |
vfree(rx_ring->buffer_info); |
1457 |
kfree(rx_ring->ps_pages); |
1458 |
ndev_err(adapter->netdev, |
1459 |
"Unable to allocate memory for the transmit descriptor ring\n"); |
1460 |
return err; |
1461 |
} |
1462 |
|
1463 |
/** |
1464 |
* e1000_clean_tx_ring - Free Tx Buffers |
1465 |
* @adapter: board private structure |
1466 |
**/ |
1467 |
static void e1000_clean_tx_ring(struct e1000_adapter *adapter) |
1468 |
{ |
1469 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
1470 |
struct e1000_buffer *buffer_info; |
1471 |
unsigned long size; |
1472 |
unsigned int i; |
1473 |
|
1474 |
for (i = 0; i < tx_ring->count; i++) { |
1475 |
buffer_info = &tx_ring->buffer_info[i]; |
1476 |
e1000_put_txbuf(adapter, buffer_info); |
1477 |
} |
1478 |
|
1479 |
size = sizeof(struct e1000_buffer) * tx_ring->count; |
1480 |
memset(tx_ring->buffer_info, 0, size); |
1481 |
|
1482 |
memset(tx_ring->desc, 0, tx_ring->size); |
1483 |
|
1484 |
tx_ring->next_to_use = 0; |
1485 |
tx_ring->next_to_clean = 0; |
1486 |
|
1487 |
writel(0, adapter->hw.hw_addr + tx_ring->head); |
1488 |
writel(0, adapter->hw.hw_addr + tx_ring->tail); |
1489 |
} |
1490 |
|
1491 |
/** |
1492 |
* e1000e_free_tx_resources - Free Tx Resources per Queue |
1493 |
* @adapter: board private structure |
1494 |
* |
1495 |
* Free all transmit software resources |
1496 |
**/ |
1497 |
void e1000e_free_tx_resources(struct e1000_adapter *adapter) |
1498 |
{ |
1499 |
struct pci_dev *pdev = adapter->pdev; |
1500 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
1501 |
|
1502 |
e1000_clean_tx_ring(adapter); |
1503 |
|
1504 |
vfree(tx_ring->buffer_info); |
1505 |
tx_ring->buffer_info = NULL; |
1506 |
|
1507 |
dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, |
1508 |
tx_ring->dma); |
1509 |
tx_ring->desc = NULL; |
1510 |
} |
1511 |
|
1512 |
/** |
1513 |
* e1000e_free_rx_resources - Free Rx Resources |
1514 |
* @adapter: board private structure |
1515 |
* |
1516 |
* Free all receive software resources |
1517 |
**/ |
1518 |
|
1519 |
void e1000e_free_rx_resources(struct e1000_adapter *adapter) |
1520 |
{ |
1521 |
struct pci_dev *pdev = adapter->pdev; |
1522 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
1523 |
|
1524 |
e1000_clean_rx_ring(adapter); |
1525 |
|
1526 |
vfree(rx_ring->buffer_info); |
1527 |
rx_ring->buffer_info = NULL; |
1528 |
|
1529 |
kfree(rx_ring->ps_pages); |
1530 |
rx_ring->ps_pages = NULL; |
1531 |
|
1532 |
dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
1533 |
rx_ring->dma); |
1534 |
rx_ring->desc = NULL; |
1535 |
} |
1536 |
|
1537 |
/** |
1538 |
* e1000_update_itr - update the dynamic ITR value based on statistics |
1539 |
* Stores a new ITR value based on packets and byte |
1540 |
* counts during the last interrupt. The advantage of per interrupt |
1541 |
* computation is faster updates and more accurate ITR for the current |
1542 |
* traffic pattern. Constants in this function were computed |
1543 |
* based on theoretical maximum wire speed and thresholds were set based |
1544 |
* on testing data as well as attempting to minimize response time |
1545 |
* while increasing bulk throughput. |
1546 |
* this functionality is controlled by the InterruptThrottleRate module |
1547 |
* parameter (see e1000_param.c) |
1548 |
* @adapter: pointer to adapter |
1549 |
* @itr_setting: current adapter->itr |
1550 |
* @packets: the number of packets during this measurement interval |
1551 |
* @bytes: the number of bytes during this measurement interval |
1552 |
**/ |
1553 |
static unsigned int e1000_update_itr(struct e1000_adapter *adapter, |
1554 |
u16 itr_setting, int packets, |
1555 |
int bytes) |
1556 |
{ |
1557 |
unsigned int retval = itr_setting; |
1558 |
|
1559 |
if (packets == 0) |
1560 |
goto update_itr_done; |
1561 |
|
1562 |
switch (itr_setting) { |
1563 |
case lowest_latency: |
1564 |
/* handle TSO and jumbo frames */ |
1565 |
if (bytes/packets > 8000) |
1566 |
retval = bulk_latency; |
1567 |
else if ((packets < 5) && (bytes > 512)) { |
1568 |
retval = low_latency; |
1569 |
} |
1570 |
break; |
1571 |
case low_latency: /* 50 usec aka 20000 ints/s */ |
1572 |
if (bytes > 10000) { |
1573 |
/* this if handles the TSO accounting */ |
1574 |
if (bytes/packets > 8000) { |
1575 |
retval = bulk_latency; |
1576 |
} else if ((packets < 10) || ((bytes/packets) > 1200)) { |
1577 |
retval = bulk_latency; |
1578 |
} else if ((packets > 35)) { |
1579 |
retval = lowest_latency; |
1580 |
} |
1581 |
} else if (bytes/packets > 2000) { |
1582 |
retval = bulk_latency; |
1583 |
} else if (packets <= 2 && bytes < 512) { |
1584 |
retval = lowest_latency; |
1585 |
} |
1586 |
break; |
1587 |
case bulk_latency: /* 250 usec aka 4000 ints/s */ |
1588 |
if (bytes > 25000) { |
1589 |
if (packets > 35) { |
1590 |
retval = low_latency; |
1591 |
} |
1592 |
} else if (bytes < 6000) { |
1593 |
retval = low_latency; |
1594 |
} |
1595 |
break; |
1596 |
} |
1597 |
|
1598 |
update_itr_done: |
1599 |
return retval; |
1600 |
} |
1601 |
|
1602 |
static void e1000_set_itr(struct e1000_adapter *adapter) |
1603 |
{ |
1604 |
struct e1000_hw *hw = &adapter->hw; |
1605 |
u16 current_itr; |
1606 |
u32 new_itr = adapter->itr; |
1607 |
|
1608 |
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */ |
1609 |
if (adapter->link_speed != SPEED_1000) { |
1610 |
current_itr = 0; |
1611 |
new_itr = 4000; |
1612 |
goto set_itr_now; |
1613 |
} |
1614 |
|
1615 |
adapter->tx_itr = e1000_update_itr(adapter, |
1616 |
adapter->tx_itr, |
1617 |
adapter->total_tx_packets, |
1618 |
adapter->total_tx_bytes); |
1619 |
/* conservative mode (itr 3) eliminates the lowest_latency setting */ |
1620 |
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) |
1621 |
adapter->tx_itr = low_latency; |
1622 |
|
1623 |
adapter->rx_itr = e1000_update_itr(adapter, |
1624 |
adapter->rx_itr, |
1625 |
adapter->total_rx_packets, |
1626 |
adapter->total_rx_bytes); |
1627 |
/* conservative mode (itr 3) eliminates the lowest_latency setting */ |
1628 |
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) |
1629 |
adapter->rx_itr = low_latency; |
1630 |
|
1631 |
current_itr = max(adapter->rx_itr, adapter->tx_itr); |
1632 |
|
1633 |
switch (current_itr) { |
1634 |
/* counts and packets in update_itr are dependent on these numbers */ |
1635 |
case lowest_latency: |
1636 |
new_itr = 70000; |
1637 |
break; |
1638 |
case low_latency: |
1639 |
new_itr = 20000; /* aka hwitr = ~200 */ |
1640 |
break; |
1641 |
case bulk_latency: |
1642 |
new_itr = 4000; |
1643 |
break; |
1644 |
default: |
1645 |
break; |
1646 |
} |
1647 |
|
1648 |
set_itr_now: |
1649 |
if (new_itr != adapter->itr) { |
1650 |
/* this attempts to bias the interrupt rate towards Bulk |
1651 |
* by adding intermediate steps when interrupt rate is |
1652 |
* increasing */ |
1653 |
new_itr = new_itr > adapter->itr ? |
1654 |
min(adapter->itr + (new_itr >> 2), new_itr) : |
1655 |
new_itr; |
1656 |
adapter->itr = new_itr; |
1657 |
ew32(ITR, 1000000000 / (new_itr * 256)); |
1658 |
} |
1659 |
} |
1660 |
|
1661 |
/** |
1662 |
* e1000_clean - NAPI Rx polling callback |
1663 |
* @adapter: board private structure |
1664 |
**/ |
1665 |
static int e1000_clean(struct napi_struct *napi, int budget) |
1666 |
{ |
1667 |
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); |
1668 |
struct net_device *poll_dev = adapter->netdev; |
1669 |
int tx_cleaned = 0, work_done = 0; |
1670 |
|
1671 |
/* Must NOT use netdev_priv macro here. */ |
1672 |
adapter = poll_dev->priv; |
1673 |
|
1674 |
/* Keep link state information with original netdev */ |
1675 |
if (!netif_carrier_ok(poll_dev)) |
1676 |
goto quit_polling; |
1677 |
|
1678 |
/* e1000_clean is called per-cpu. This lock protects |
1679 |
* tx_ring from being cleaned by multiple cpus |
1680 |
* simultaneously. A failure obtaining the lock means |
1681 |
* tx_ring is currently being cleaned anyway. */ |
1682 |
if (spin_trylock(&adapter->tx_queue_lock)) { |
1683 |
tx_cleaned = e1000_clean_tx_irq(adapter); |
1684 |
spin_unlock(&adapter->tx_queue_lock); |
1685 |
} |
1686 |
|
1687 |
adapter->clean_rx(adapter, &work_done, budget); |
1688 |
|
1689 |
/* If no Tx and not enough Rx work done, exit the polling mode */ |
1690 |
if ((!tx_cleaned && (work_done < budget)) || |
1691 |
!netif_running(poll_dev)) { |
1692 |
quit_polling: |
1693 |
if (adapter->itr_setting & 3) |
1694 |
e1000_set_itr(adapter); |
1695 |
netif_rx_complete(poll_dev, napi); |
1696 |
e1000_irq_enable(adapter); |
1697 |
} |
1698 |
|
1699 |
return work_done; |
1700 |
} |
1701 |
|
1702 |
static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
1703 |
{ |
1704 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
1705 |
struct e1000_hw *hw = &adapter->hw; |
1706 |
u32 vfta, index; |
1707 |
|
1708 |
/* don't update vlan cookie if already programmed */ |
1709 |
if ((adapter->hw.mng_cookie.status & |
1710 |
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
1711 |
(vid == adapter->mng_vlan_id)) |
1712 |
return; |
1713 |
/* add VID to filter table */ |
1714 |
index = (vid >> 5) & 0x7F; |
1715 |
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
1716 |
vfta |= (1 << (vid & 0x1F)); |
1717 |
e1000e_write_vfta(hw, index, vfta); |
1718 |
} |
1719 |
|
1720 |
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
1721 |
{ |
1722 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
1723 |
struct e1000_hw *hw = &adapter->hw; |
1724 |
u32 vfta, index; |
1725 |
|
1726 |
e1000_irq_disable(adapter); |
1727 |
vlan_group_set_device(adapter->vlgrp, vid, NULL); |
1728 |
e1000_irq_enable(adapter); |
1729 |
|
1730 |
if ((adapter->hw.mng_cookie.status & |
1731 |
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
1732 |
(vid == adapter->mng_vlan_id)) { |
1733 |
/* release control to f/w */ |
1734 |
e1000_release_hw_control(adapter); |
1735 |
return; |
1736 |
} |
1737 |
|
1738 |
/* remove VID from filter table */ |
1739 |
index = (vid >> 5) & 0x7F; |
1740 |
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); |
1741 |
vfta &= ~(1 << (vid & 0x1F)); |
1742 |
e1000e_write_vfta(hw, index, vfta); |
1743 |
} |
1744 |
|
1745 |
static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
1746 |
{ |
1747 |
struct net_device *netdev = adapter->netdev; |
1748 |
u16 vid = adapter->hw.mng_cookie.vlan_id; |
1749 |
u16 old_vid = adapter->mng_vlan_id; |
1750 |
|
1751 |
if (!adapter->vlgrp) |
1752 |
return; |
1753 |
|
1754 |
if (!vlan_group_get_device(adapter->vlgrp, vid)) { |
1755 |
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
1756 |
if (adapter->hw.mng_cookie.status & |
1757 |
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { |
1758 |
e1000_vlan_rx_add_vid(netdev, vid); |
1759 |
adapter->mng_vlan_id = vid; |
1760 |
} |
1761 |
|
1762 |
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && |
1763 |
(vid != old_vid) && |
1764 |
!vlan_group_get_device(adapter->vlgrp, old_vid)) |
1765 |
e1000_vlan_rx_kill_vid(netdev, old_vid); |
1766 |
} else { |
1767 |
adapter->mng_vlan_id = vid; |
1768 |
} |
1769 |
} |
1770 |
|
1771 |
|
1772 |
static void e1000_vlan_rx_register(struct net_device *netdev, |
1773 |
struct vlan_group *grp) |
1774 |
{ |
1775 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
1776 |
struct e1000_hw *hw = &adapter->hw; |
1777 |
u32 ctrl, rctl; |
1778 |
|
1779 |
e1000_irq_disable(adapter); |
1780 |
adapter->vlgrp = grp; |
1781 |
|
1782 |
if (grp) { |
1783 |
/* enable VLAN tag insert/strip */ |
1784 |
ctrl = er32(CTRL); |
1785 |
ctrl |= E1000_CTRL_VME; |
1786 |
ew32(CTRL, ctrl); |
1787 |
|
1788 |
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
1789 |
/* enable VLAN receive filtering */ |
1790 |
rctl = er32(RCTL); |
1791 |
rctl |= E1000_RCTL_VFE; |
1792 |
rctl &= ~E1000_RCTL_CFIEN; |
1793 |
ew32(RCTL, rctl); |
1794 |
e1000_update_mng_vlan(adapter); |
1795 |
} |
1796 |
} else { |
1797 |
/* disable VLAN tag insert/strip */ |
1798 |
ctrl = er32(CTRL); |
1799 |
ctrl &= ~E1000_CTRL_VME; |
1800 |
ew32(CTRL, ctrl); |
1801 |
|
1802 |
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { |
1803 |
/* disable VLAN filtering */ |
1804 |
rctl = er32(RCTL); |
1805 |
rctl &= ~E1000_RCTL_VFE; |
1806 |
ew32(RCTL, rctl); |
1807 |
if (adapter->mng_vlan_id != |
1808 |
(u16)E1000_MNG_VLAN_NONE) { |
1809 |
e1000_vlan_rx_kill_vid(netdev, |
1810 |
adapter->mng_vlan_id); |
1811 |
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
1812 |
} |
1813 |
} |
1814 |
} |
1815 |
|
1816 |
e1000_irq_enable(adapter); |
1817 |
} |
1818 |
|
1819 |
static void e1000_restore_vlan(struct e1000_adapter *adapter) |
1820 |
{ |
1821 |
u16 vid; |
1822 |
|
1823 |
e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
1824 |
|
1825 |
if (!adapter->vlgrp) |
1826 |
return; |
1827 |
|
1828 |
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
1829 |
if (!vlan_group_get_device(adapter->vlgrp, vid)) |
1830 |
continue; |
1831 |
e1000_vlan_rx_add_vid(adapter->netdev, vid); |
1832 |
} |
1833 |
} |
1834 |
|
1835 |
static void e1000_init_manageability(struct e1000_adapter *adapter) |
1836 |
{ |
1837 |
struct e1000_hw *hw = &adapter->hw; |
1838 |
u32 manc, manc2h; |
1839 |
|
1840 |
if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) |
1841 |
return; |
1842 |
|
1843 |
manc = er32(MANC); |
1844 |
|
1845 |
/* disable hardware interception of ARP */ |
1846 |
manc &= ~(E1000_MANC_ARP_EN); |
1847 |
|
1848 |
/* enable receiving management packets to the host. this will probably |
1849 |
* generate destination unreachable messages from the host OS, but |
1850 |
* the packets will be handled on SMBUS */ |
1851 |
manc |= E1000_MANC_EN_MNG2HOST; |
1852 |
manc2h = er32(MANC2H); |
1853 |
#define E1000_MNG2HOST_PORT_623 (1 << 5) |
1854 |
#define E1000_MNG2HOST_PORT_664 (1 << 6) |
1855 |
manc2h |= E1000_MNG2HOST_PORT_623; |
1856 |
manc2h |= E1000_MNG2HOST_PORT_664; |
1857 |
ew32(MANC2H, manc2h); |
1858 |
ew32(MANC, manc); |
1859 |
} |
1860 |
|
1861 |
/** |
1862 |
* e1000_configure_tx - Configure 8254x Transmit Unit after Reset |
1863 |
* @adapter: board private structure |
1864 |
* |
1865 |
* Configure the Tx unit of the MAC after a reset. |
1866 |
**/ |
1867 |
static void e1000_configure_tx(struct e1000_adapter *adapter) |
1868 |
{ |
1869 |
struct e1000_hw *hw = &adapter->hw; |
1870 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
1871 |
u64 tdba; |
1872 |
u32 tdlen, tctl, tipg, tarc; |
1873 |
u32 ipgr1, ipgr2; |
1874 |
|
1875 |
/* Setup the HW Tx Head and Tail descriptor pointers */ |
1876 |
tdba = tx_ring->dma; |
1877 |
tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); |
1878 |
ew32(TDBAL, (tdba & DMA_32BIT_MASK)); |
1879 |
ew32(TDBAH, (tdba >> 32)); |
1880 |
ew32(TDLEN, tdlen); |
1881 |
ew32(TDH, 0); |
1882 |
ew32(TDT, 0); |
1883 |
tx_ring->head = E1000_TDH; |
1884 |
tx_ring->tail = E1000_TDT; |
1885 |
|
1886 |
/* Set the default values for the Tx Inter Packet Gap timer */ |
1887 |
tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ |
1888 |
ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ |
1889 |
ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ |
1890 |
|
1891 |
if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) |
1892 |
ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ |
1893 |
|
1894 |
tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; |
1895 |
tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; |
1896 |
ew32(TIPG, tipg); |
1897 |
|
1898 |
/* Set the Tx Interrupt Delay register */ |
1899 |
ew32(TIDV, adapter->tx_int_delay); |
1900 |
/* tx irq moderation */ |
1901 |
ew32(TADV, adapter->tx_abs_int_delay); |
1902 |
|
1903 |
/* Program the Transmit Control Register */ |
1904 |
tctl = er32(TCTL); |
1905 |
tctl &= ~E1000_TCTL_CT; |
1906 |
tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1907 |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1908 |
|
1909 |
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { |
1910 |
tarc = er32(TARC0); |
1911 |
/* set the speed mode bit, we'll clear it if we're not at |
1912 |
* gigabit link later */ |
1913 |
#define SPEED_MODE_BIT (1 << 21) |
1914 |
tarc |= SPEED_MODE_BIT; |
1915 |
ew32(TARC0, tarc); |
1916 |
} |
1917 |
|
1918 |
/* errata: program both queues to unweighted RR */ |
1919 |
if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { |
1920 |
tarc = er32(TARC0); |
1921 |
tarc |= 1; |
1922 |
ew32(TARC0, tarc); |
1923 |
tarc = er32(TARC1); |
1924 |
tarc |= 1; |
1925 |
ew32(TARC1, tarc); |
1926 |
} |
1927 |
|
1928 |
e1000e_config_collision_dist(hw); |
1929 |
|
1930 |
/* Setup Transmit Descriptor Settings for eop descriptor */ |
1931 |
adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; |
1932 |
|
1933 |
/* only set IDE if we are delaying interrupts using the timers */ |
1934 |
if (adapter->tx_int_delay) |
1935 |
adapter->txd_cmd |= E1000_TXD_CMD_IDE; |
1936 |
|
1937 |
/* enable Report Status bit */ |
1938 |
adapter->txd_cmd |= E1000_TXD_CMD_RS; |
1939 |
|
1940 |
ew32(TCTL, tctl); |
1941 |
|
1942 |
adapter->tx_queue_len = adapter->netdev->tx_queue_len; |
1943 |
} |
1944 |
|
1945 |
/** |
1946 |
* e1000_setup_rctl - configure the receive control registers |
1947 |
* @adapter: Board private structure |
1948 |
**/ |
1949 |
#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ |
1950 |
(((S) & (PAGE_SIZE - 1)) ? 1 : 0)) |
1951 |
static void e1000_setup_rctl(struct e1000_adapter *adapter) |
1952 |
{ |
1953 |
struct e1000_hw *hw = &adapter->hw; |
1954 |
u32 rctl, rfctl; |
1955 |
u32 psrctl = 0; |
1956 |
u32 pages = 0; |
1957 |
|
1958 |
/* Program MC offset vector base */ |
1959 |
rctl = er32(RCTL); |
1960 |
rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
1961 |
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | |
1962 |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1963 |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); |
1964 |
|
1965 |
/* Do not Store bad packets */ |
1966 |
rctl &= ~E1000_RCTL_SBP; |
1967 |
|
1968 |
/* Enable Long Packet receive */ |
1969 |
if (adapter->netdev->mtu <= ETH_DATA_LEN) |
1970 |
rctl &= ~E1000_RCTL_LPE; |
1971 |
else |
1972 |
rctl |= E1000_RCTL_LPE; |
1973 |
|
1974 |
/* Setup buffer sizes */ |
1975 |
rctl &= ~E1000_RCTL_SZ_4096; |
1976 |
rctl |= E1000_RCTL_BSEX; |
1977 |
switch (adapter->rx_buffer_len) { |
1978 |
case 256: |
1979 |
rctl |= E1000_RCTL_SZ_256; |
1980 |
rctl &= ~E1000_RCTL_BSEX; |
1981 |
break; |
1982 |
case 512: |
1983 |
rctl |= E1000_RCTL_SZ_512; |
1984 |
rctl &= ~E1000_RCTL_BSEX; |
1985 |
break; |
1986 |
case 1024: |
1987 |
rctl |= E1000_RCTL_SZ_1024; |
1988 |
rctl &= ~E1000_RCTL_BSEX; |
1989 |
break; |
1990 |
case 2048: |
1991 |
default: |
1992 |
rctl |= E1000_RCTL_SZ_2048; |
1993 |
rctl &= ~E1000_RCTL_BSEX; |
1994 |
break; |
1995 |
case 4096: |
1996 |
rctl |= E1000_RCTL_SZ_4096; |
1997 |
break; |
1998 |
case 8192: |
1999 |
rctl |= E1000_RCTL_SZ_8192; |
2000 |
break; |
2001 |
case 16384: |
2002 |
rctl |= E1000_RCTL_SZ_16384; |
2003 |
break; |
2004 |
} |
2005 |
|
2006 |
/* |
2007 |
* 82571 and greater support packet-split where the protocol |
2008 |
* header is placed in skb->data and the packet data is |
2009 |
* placed in pages hanging off of skb_shinfo(skb)->nr_frags. |
2010 |
* In the case of a non-split, skb->data is linearly filled, |
2011 |
* followed by the page buffers. Therefore, skb->data is |
2012 |
* sized to hold the largest protocol header. |
2013 |
* |
2014 |
* allocations using alloc_page take too long for regular MTU |
2015 |
* so only enable packet split for jumbo frames |
2016 |
* |
2017 |
* Using pages when the page size is greater than 16k wastes |
2018 |
* a lot of memory, since we allocate 3 pages at all times |
2019 |
* per packet. |
2020 |
*/ |
2021 |
adapter->rx_ps_pages = 0; |
2022 |
pages = PAGE_USE_COUNT(adapter->netdev->mtu); |
2023 |
if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) |
2024 |
adapter->rx_ps_pages = pages; |
2025 |
|
2026 |
if (adapter->rx_ps_pages) { |
2027 |
/* Configure extra packet-split registers */ |
2028 |
rfctl = er32(RFCTL); |
2029 |
rfctl |= E1000_RFCTL_EXTEN; |
2030 |
/* disable packet split support for IPv6 extension headers, |
2031 |
* because some malformed IPv6 headers can hang the RX */ |
2032 |
rfctl |= (E1000_RFCTL_IPV6_EX_DIS | |
2033 |
E1000_RFCTL_NEW_IPV6_EXT_DIS); |
2034 |
|
2035 |
ew32(RFCTL, rfctl); |
2036 |
|
2037 |
/* disable the stripping of CRC because it breaks |
2038 |
* BMC firmware connected over SMBUS */ |
2039 |
rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */; |
2040 |
|
2041 |
psrctl |= adapter->rx_ps_bsize0 >> |
2042 |
E1000_PSRCTL_BSIZE0_SHIFT; |
2043 |
|
2044 |
switch (adapter->rx_ps_pages) { |
2045 |
case 3: |
2046 |
psrctl |= PAGE_SIZE << |
2047 |
E1000_PSRCTL_BSIZE3_SHIFT; |
2048 |
case 2: |
2049 |
psrctl |= PAGE_SIZE << |
2050 |
E1000_PSRCTL_BSIZE2_SHIFT; |
2051 |
case 1: |
2052 |
psrctl |= PAGE_SIZE >> |
2053 |
E1000_PSRCTL_BSIZE1_SHIFT; |
2054 |
break; |
2055 |
} |
2056 |
|
2057 |
ew32(PSRCTL, psrctl); |
2058 |
} |
2059 |
|
2060 |
ew32(RCTL, rctl); |
2061 |
} |
2062 |
|
2063 |
/** |
2064 |
* e1000_configure_rx - Configure Receive Unit after Reset |
2065 |
* @adapter: board private structure |
2066 |
* |
2067 |
* Configure the Rx unit of the MAC after a reset. |
2068 |
**/ |
2069 |
static void e1000_configure_rx(struct e1000_adapter *adapter) |
2070 |
{ |
2071 |
struct e1000_hw *hw = &adapter->hw; |
2072 |
struct e1000_ring *rx_ring = adapter->rx_ring; |
2073 |
u64 rdba; |
2074 |
u32 rdlen, rctl, rxcsum, ctrl_ext; |
2075 |
|
2076 |
if (adapter->rx_ps_pages) { |
2077 |
/* this is a 32 byte descriptor */ |
2078 |
rdlen = rx_ring->count * |
2079 |
sizeof(union e1000_rx_desc_packet_split); |
2080 |
adapter->clean_rx = e1000_clean_rx_irq_ps; |
2081 |
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
2082 |
} else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) { |
2083 |
rdlen = rx_ring->count * |
2084 |
sizeof(struct e1000_rx_desc); |
2085 |
adapter->clean_rx = e1000_clean_rx_irq_jumbo; |
2086 |
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo; |
2087 |
} else { |
2088 |
rdlen = rx_ring->count * |
2089 |
sizeof(struct e1000_rx_desc); |
2090 |
adapter->clean_rx = e1000_clean_rx_irq; |
2091 |
adapter->alloc_rx_buf = e1000_alloc_rx_buffers; |
2092 |
} |
2093 |
|
2094 |
/* disable receives while setting up the descriptors */ |
2095 |
rctl = er32(RCTL); |
2096 |
ew32(RCTL, rctl & ~E1000_RCTL_EN); |
2097 |
e1e_flush(); |
2098 |
msleep(10); |
2099 |
|
2100 |
/* set the Receive Delay Timer Register */ |
2101 |
ew32(RDTR, adapter->rx_int_delay); |
2102 |
|
2103 |
/* irq moderation */ |
2104 |
ew32(RADV, adapter->rx_abs_int_delay); |
2105 |
if (adapter->itr_setting != 0) |
2106 |
ew32(ITR, |
2107 |
1000000000 / (adapter->itr * 256)); |
2108 |
|
2109 |
ctrl_ext = er32(CTRL_EXT); |
2110 |
/* Reset delay timers after every interrupt */ |
2111 |
ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; |
2112 |
/* Auto-Mask interrupts upon ICR access */ |
2113 |
ctrl_ext |= E1000_CTRL_EXT_IAME; |
2114 |
ew32(IAM, 0xffffffff); |
2115 |
ew32(CTRL_EXT, ctrl_ext); |
2116 |
e1e_flush(); |
2117 |
|
2118 |
/* Setup the HW Rx Head and Tail Descriptor Pointers and |
2119 |
* the Base and Length of the Rx Descriptor Ring */ |
2120 |
rdba = rx_ring->dma; |
2121 |
ew32(RDBAL, (rdba & DMA_32BIT_MASK)); |
2122 |
ew32(RDBAH, (rdba >> 32)); |
2123 |
ew32(RDLEN, rdlen); |
2124 |
ew32(RDH, 0); |
2125 |
ew32(RDT, 0); |
2126 |
rx_ring->head = E1000_RDH; |
2127 |
rx_ring->tail = E1000_RDT; |
2128 |
|
2129 |
/* Enable Receive Checksum Offload for TCP and UDP */ |
2130 |
rxcsum = er32(RXCSUM); |
2131 |
if (adapter->flags & FLAG_RX_CSUM_ENABLED) { |
2132 |
rxcsum |= E1000_RXCSUM_TUOFL; |
2133 |
|
2134 |
/* IPv4 payload checksum for UDP fragments must be |
2135 |
* used in conjunction with packet-split. */ |
2136 |
if (adapter->rx_ps_pages) |
2137 |
rxcsum |= E1000_RXCSUM_IPPCSE; |
2138 |
} else { |
2139 |
rxcsum &= ~E1000_RXCSUM_TUOFL; |
2140 |
/* no need to clear IPPCSE as it defaults to 0 */ |
2141 |
} |
2142 |
ew32(RXCSUM, rxcsum); |
2143 |
|
2144 |
/* Enable early receives on supported devices, only takes effect when |
2145 |
* packet size is equal or larger than the specified value (in 8 byte |
2146 |
* units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ |
2147 |
if ((adapter->flags & FLAG_HAS_ERT) && |
2148 |
(adapter->netdev->mtu > ETH_DATA_LEN)) |
2149 |
ew32(ERT, E1000_ERT_2048); |
2150 |
|
2151 |
/* Enable Receives */ |
2152 |
ew32(RCTL, rctl); |
2153 |
} |
2154 |
|
2155 |
/** |
2156 |
* e1000_mc_addr_list_update - Update Multicast addresses |
2157 |
* @hw: pointer to the HW structure |
2158 |
* @mc_addr_list: array of multicast addresses to program |
2159 |
* @mc_addr_count: number of multicast addresses to program |
2160 |
* @rar_used_count: the first RAR register free to program |
2161 |
* @rar_count: total number of supported Receive Address Registers |
2162 |
* |
2163 |
* Updates the Receive Address Registers and Multicast Table Array. |
2164 |
* The caller must have a packed mc_addr_list of multicast addresses. |
2165 |
* The parameter rar_count will usually be hw->mac.rar_entry_count |
2166 |
* unless there are workarounds that change this. Currently no func pointer |
2167 |
* exists and all implementations are handled in the generic version of this |
2168 |
* function. |
2169 |
**/ |
2170 |
static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list, |
2171 |
u32 mc_addr_count, u32 rar_used_count, |
2172 |
u32 rar_count) |
2173 |
{ |
2174 |
hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count, |
2175 |
rar_used_count, rar_count); |
2176 |
} |
2177 |
|
2178 |
/** |
2179 |
* e1000_set_multi - Multicast and Promiscuous mode set |
2180 |
* @netdev: network interface device structure |
2181 |
* |
2182 |
* The set_multi entry point is called whenever the multicast address |
2183 |
* list or the network interface flags are updated. This routine is |
2184 |
* responsible for configuring the hardware for proper multicast, |
2185 |
* promiscuous mode, and all-multi behavior. |
2186 |
**/ |
2187 |
static void e1000_set_multi(struct net_device *netdev) |
2188 |
{ |
2189 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
2190 |
struct e1000_hw *hw = &adapter->hw; |
2191 |
struct e1000_mac_info *mac = &hw->mac; |
2192 |
struct dev_mc_list *mc_ptr; |
2193 |
u8 *mta_list; |
2194 |
u32 rctl; |
2195 |
int i; |
2196 |
|
2197 |
/* Check for Promiscuous and All Multicast modes */ |
2198 |
|
2199 |
rctl = er32(RCTL); |
2200 |
|
2201 |
if (netdev->flags & IFF_PROMISC) { |
2202 |
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
2203 |
} else if (netdev->flags & IFF_ALLMULTI) { |
2204 |
rctl |= E1000_RCTL_MPE; |
2205 |
rctl &= ~E1000_RCTL_UPE; |
2206 |
} else { |
2207 |
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); |
2208 |
} |
2209 |
|
2210 |
ew32(RCTL, rctl); |
2211 |
|
2212 |
if (netdev->mc_count) { |
2213 |
mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); |
2214 |
if (!mta_list) |
2215 |
return; |
2216 |
|
2217 |
/* prepare a packed array of only addresses. */ |
2218 |
mc_ptr = netdev->mc_list; |
2219 |
|
2220 |
for (i = 0; i < netdev->mc_count; i++) { |
2221 |
if (!mc_ptr) |
2222 |
break; |
2223 |
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, |
2224 |
ETH_ALEN); |
2225 |
mc_ptr = mc_ptr->next; |
2226 |
} |
2227 |
|
2228 |
e1000_mc_addr_list_update(hw, mta_list, i, 1, |
2229 |
mac->rar_entry_count); |
2230 |
kfree(mta_list); |
2231 |
} else { |
2232 |
/* |
2233 |
* if we're called from probe, we might not have |
2234 |
* anything to do here, so clear out the list |
2235 |
*/ |
2236 |
e1000_mc_addr_list_update(hw, NULL, 0, 1, |
2237 |
mac->rar_entry_count); |
2238 |
} |
2239 |
} |
2240 |
|
2241 |
/** |
2242 |
* e1000_configure - configure the hardware for RX and TX |
2243 |
* @adapter: private board structure |
2244 |
**/ |
2245 |
static void e1000_configure(struct e1000_adapter *adapter) |
2246 |
{ |
2247 |
e1000_set_multi(adapter->netdev); |
2248 |
|
2249 |
e1000_restore_vlan(adapter); |
2250 |
e1000_init_manageability(adapter); |
2251 |
|
2252 |
e1000_configure_tx(adapter); |
2253 |
e1000_setup_rctl(adapter); |
2254 |
e1000_configure_rx(adapter); |
2255 |
adapter->alloc_rx_buf(adapter, |
2256 |
e1000_desc_unused(adapter->rx_ring)); |
2257 |
} |
2258 |
|
2259 |
/** |
2260 |
* e1000e_power_up_phy - restore link in case the phy was powered down |
2261 |
* @adapter: address of board private structure |
2262 |
* |
2263 |
* The phy may be powered down to save power and turn off link when the |
2264 |
* driver is unloaded and wake on lan is not enabled (among others) |
2265 |
* *** this routine MUST be followed by a call to e1000e_reset *** |
2266 |
**/ |
2267 |
void e1000e_power_up_phy(struct e1000_adapter *adapter) |
2268 |
{ |
2269 |
u16 mii_reg = 0; |
2270 |
|
2271 |
/* Just clear the power down bit to wake the phy back up */ |
2272 |
if (adapter->hw.media_type == e1000_media_type_copper) { |
2273 |
/* according to the manual, the phy will retain its |
2274 |
* settings across a power-down/up cycle */ |
2275 |
e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); |
2276 |
mii_reg &= ~MII_CR_POWER_DOWN; |
2277 |
e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); |
2278 |
} |
2279 |
|
2280 |
adapter->hw.mac.ops.setup_link(&adapter->hw); |
2281 |
} |
2282 |
|
2283 |
/** |
2284 |
* e1000_power_down_phy - Power down the PHY |
2285 |
* |
2286 |
* Power down the PHY so no link is implied when interface is down |
2287 |
* The PHY cannot be powered down is management or WoL is active |
2288 |
*/ |
2289 |
static void e1000_power_down_phy(struct e1000_adapter *adapter) |
2290 |
{ |
2291 |
struct e1000_hw *hw = &adapter->hw; |
2292 |
u16 mii_reg; |
2293 |
|
2294 |
/* WoL is enabled */ |
2295 |
if (!adapter->wol) |
2296 |
return; |
2297 |
|
2298 |
/* non-copper PHY? */ |
2299 |
if (adapter->hw.media_type != e1000_media_type_copper) |
2300 |
return; |
2301 |
|
2302 |
/* reset is blocked because of a SoL/IDER session */ |
2303 |
if (e1000e_check_mng_mode(hw) || |
2304 |
e1000_check_reset_block(hw)) |
2305 |
return; |
2306 |
|
2307 |
/* managebility (AMT) is enabled */ |
2308 |
if (er32(MANC) & E1000_MANC_SMBUS_EN) |
2309 |
return; |
2310 |
|
2311 |
/* power down the PHY */ |
2312 |
e1e_rphy(hw, PHY_CONTROL, &mii_reg); |
2313 |
mii_reg |= MII_CR_POWER_DOWN; |
2314 |
e1e_wphy(hw, PHY_CONTROL, mii_reg); |
2315 |
mdelay(1); |
2316 |
} |
2317 |
|
2318 |
/** |
2319 |
* e1000e_reset - bring the hardware into a known good state |
2320 |
* |
2321 |
* This function boots the hardware and enables some settings that |
2322 |
* require a configuration cycle of the hardware - those cannot be |
2323 |
* set/changed during runtime. After reset the device needs to be |
2324 |
* properly configured for rx, tx etc. |
2325 |
*/ |
2326 |
void e1000e_reset(struct e1000_adapter *adapter) |
2327 |
{ |
2328 |
struct e1000_mac_info *mac = &adapter->hw.mac; |
2329 |
struct e1000_hw *hw = &adapter->hw; |
2330 |
u32 tx_space, min_tx_space, min_rx_space; |
2331 |
u16 hwm; |
2332 |
|
2333 |
if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { |
2334 |
/* To maintain wire speed transmits, the Tx FIFO should be |
2335 |
* large enough to accommodate two full transmit packets, |
2336 |
* rounded up to the next 1KB and expressed in KB. Likewise, |
2337 |
* the Rx FIFO should be large enough to accommodate at least |
2338 |
* one full receive packet and is similarly rounded up and |
2339 |
* expressed in KB. */ |
2340 |
adapter->pba = er32(PBA); |
2341 |
/* upper 16 bits has Tx packet buffer allocation size in KB */ |
2342 |
tx_space = adapter->pba >> 16; |
2343 |
/* lower 16 bits has Rx packet buffer allocation size in KB */ |
2344 |
adapter->pba &= 0xffff; |
2345 |
/* the tx fifo also stores 16 bytes of information about the tx |
2346 |
* but don't include ethernet FCS because hardware appends it */ |
2347 |
min_tx_space = (mac->max_frame_size + |
2348 |
sizeof(struct e1000_tx_desc) - |
2349 |
ETH_FCS_LEN) * 2; |
2350 |
min_tx_space = ALIGN(min_tx_space, 1024); |
2351 |
min_tx_space >>= 10; |
2352 |
/* software strips receive CRC, so leave room for it */ |
2353 |
min_rx_space = mac->max_frame_size; |
2354 |
min_rx_space = ALIGN(min_rx_space, 1024); |
2355 |
min_rx_space >>= 10; |
2356 |
|
2357 |
/* If current Tx allocation is less than the min Tx FIFO size, |
2358 |
* and the min Tx FIFO size is less than the current Rx FIFO |
2359 |
* allocation, take space away from current Rx allocation */ |
2360 |
if (tx_space < min_tx_space && |
2361 |
((min_tx_space - tx_space) < adapter->pba)) { |
2362 |
adapter->pba -= - (min_tx_space - tx_space); |
2363 |
|
2364 |
/* if short on rx space, rx wins and must trump tx |
2365 |
* adjustment or use Early Receive if available */ |
2366 |
if ((adapter->pba < min_rx_space) && |
2367 |
(!(adapter->flags & FLAG_HAS_ERT))) |
2368 |
/* ERT enabled in e1000_configure_rx */ |
2369 |
adapter->pba = min_rx_space; |
2370 |
} |
2371 |
} |
2372 |
|
2373 |
ew32(PBA, adapter->pba); |
2374 |
|
2375 |
/* flow control settings */ |
2376 |
/* The high water mark must be low enough to fit one full frame |
2377 |
* (or the size used for early receive) above it in the Rx FIFO. |
2378 |
* Set it to the lower of: |
2379 |
* - 90% of the Rx FIFO size, and |
2380 |
* - the full Rx FIFO size minus the early receive size (for parts |
2381 |
* with ERT support assuming ERT set to E1000_ERT_2048), or |
2382 |
* - the full Rx FIFO size minus one full frame */ |
2383 |
if (adapter->flags & FLAG_HAS_ERT) |
2384 |
hwm = min(((adapter->pba << 10) * 9 / 10), |
2385 |
((adapter->pba << 10) - (E1000_ERT_2048 << 3))); |
2386 |
else |
2387 |
hwm = min(((adapter->pba << 10) * 9 / 10), |
2388 |
((adapter->pba << 10) - mac->max_frame_size)); |
2389 |
|
2390 |
mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ |
2391 |
mac->fc_low_water = mac->fc_high_water - 8; |
2392 |
|
2393 |
if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) |
2394 |
mac->fc_pause_time = 0xFFFF; |
2395 |
else |
2396 |
mac->fc_pause_time = E1000_FC_PAUSE_TIME; |
2397 |
mac->fc = mac->original_fc; |
2398 |
|
2399 |
/* Allow time for pending master requests to run */ |
2400 |
mac->ops.reset_hw(hw); |
2401 |
ew32(WUC, 0); |
2402 |
|
2403 |
if (mac->ops.init_hw(hw)) |
2404 |
ndev_err(adapter->netdev, "Hardware Error\n"); |
2405 |
|
2406 |
e1000_update_mng_vlan(adapter); |
2407 |
|
2408 |
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
2409 |
ew32(VET, ETH_P_8021Q); |
2410 |
|
2411 |
e1000e_reset_adaptive(hw); |
2412 |
e1000_get_phy_info(hw); |
2413 |
|
2414 |
if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { |
2415 |
u16 phy_data = 0; |
2416 |
/* speed up time to link by disabling smart power down, ignore |
2417 |
* the return value of this function because there is nothing |
2418 |
* different we would do if it failed */ |
2419 |
e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
2420 |
phy_data &= ~IGP02E1000_PM_SPD; |
2421 |
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
2422 |
} |
2423 |
|
2424 |
e1000_release_manageability(adapter); |
2425 |
} |
2426 |
|
2427 |
int e1000e_up(struct e1000_adapter *adapter) |
2428 |
{ |
2429 |
struct e1000_hw *hw = &adapter->hw; |
2430 |
|
2431 |
/* hardware has been reset, we need to reload some things */ |
2432 |
e1000_configure(adapter); |
2433 |
|
2434 |
clear_bit(__E1000_DOWN, &adapter->state); |
2435 |
|
2436 |
napi_enable(&adapter->napi); |
2437 |
e1000_irq_enable(adapter); |
2438 |
|
2439 |
/* fire a link change interrupt to start the watchdog */ |
2440 |
ew32(ICS, E1000_ICS_LSC); |
2441 |
return 0; |
2442 |
} |
2443 |
|
2444 |
void e1000e_down(struct e1000_adapter *adapter) |
2445 |
{ |
2446 |
struct net_device *netdev = adapter->netdev; |
2447 |
struct e1000_hw *hw = &adapter->hw; |
2448 |
u32 tctl, rctl; |
2449 |
|
2450 |
/* signal that we're down so the interrupt handler does not |
2451 |
* reschedule our watchdog timer */ |
2452 |
set_bit(__E1000_DOWN, &adapter->state); |
2453 |
|
2454 |
/* disable receives in the hardware */ |
2455 |
rctl = er32(RCTL); |
2456 |
ew32(RCTL, rctl & ~E1000_RCTL_EN); |
2457 |
/* flush and sleep below */ |
2458 |
|
2459 |
netif_stop_queue(netdev); |
2460 |
|
2461 |
/* disable transmits in the hardware */ |
2462 |
tctl = er32(TCTL); |
2463 |
tctl &= ~E1000_TCTL_EN; |
2464 |
ew32(TCTL, tctl); |
2465 |
/* flush both disables and wait for them to finish */ |
2466 |
e1e_flush(); |
2467 |
msleep(10); |
2468 |
|
2469 |
napi_disable(&adapter->napi); |
2470 |
e1000_irq_disable(adapter); |
2471 |
|
2472 |
del_timer_sync(&adapter->watchdog_timer); |
2473 |
del_timer_sync(&adapter->phy_info_timer); |
2474 |
|
2475 |
netdev->tx_queue_len = adapter->tx_queue_len; |
2476 |
netif_carrier_off(netdev); |
2477 |
adapter->link_speed = 0; |
2478 |
adapter->link_duplex = 0; |
2479 |
|
2480 |
e1000e_reset(adapter); |
2481 |
e1000_clean_tx_ring(adapter); |
2482 |
e1000_clean_rx_ring(adapter); |
2483 |
|
2484 |
/* |
2485 |
* TODO: for power management, we could drop the link and |
2486 |
* pci_disable_device here. |
2487 |
*/ |
2488 |
} |
2489 |
|
2490 |
void e1000e_reinit_locked(struct e1000_adapter *adapter) |
2491 |
{ |
2492 |
might_sleep(); |
2493 |
while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
2494 |
msleep(1); |
2495 |
e1000e_down(adapter); |
2496 |
e1000e_up(adapter); |
2497 |
clear_bit(__E1000_RESETTING, &adapter->state); |
2498 |
} |
2499 |
|
2500 |
/** |
2501 |
* e1000_sw_init - Initialize general software structures (struct e1000_adapter) |
2502 |
* @adapter: board private structure to initialize |
2503 |
* |
2504 |
* e1000_sw_init initializes the Adapter private data structure. |
2505 |
* Fields are initialized based on PCI device information and |
2506 |
* OS network device settings (MTU size). |
2507 |
**/ |
2508 |
static int __devinit e1000_sw_init(struct e1000_adapter *adapter) |
2509 |
{ |
2510 |
struct e1000_hw *hw = &adapter->hw; |
2511 |
struct net_device *netdev = adapter->netdev; |
2512 |
|
2513 |
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; |
2514 |
adapter->rx_ps_bsize0 = 128; |
2515 |
hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
2516 |
hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
2517 |
|
2518 |
adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); |
2519 |
if (!adapter->tx_ring) |
2520 |
goto err; |
2521 |
|
2522 |
adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); |
2523 |
if (!adapter->rx_ring) |
2524 |
goto err; |
2525 |
|
2526 |
spin_lock_init(&adapter->tx_queue_lock); |
2527 |
|
2528 |
/* Explicitly disable IRQ since the NIC can be in any state. */ |
2529 |
atomic_set(&adapter->irq_sem, 0); |
2530 |
e1000_irq_disable(adapter); |
2531 |
|
2532 |
spin_lock_init(&adapter->stats_lock); |
2533 |
|
2534 |
set_bit(__E1000_DOWN, &adapter->state); |
2535 |
return 0; |
2536 |
|
2537 |
err: |
2538 |
ndev_err(netdev, "Unable to allocate memory for queues\n"); |
2539 |
kfree(adapter->rx_ring); |
2540 |
kfree(adapter->tx_ring); |
2541 |
return -ENOMEM; |
2542 |
} |
2543 |
|
2544 |
/** |
2545 |
* e1000_open - Called when a network interface is made active |
2546 |
* @netdev: network interface device structure |
2547 |
* |
2548 |
* Returns 0 on success, negative value on failure |
2549 |
* |
2550 |
* The open entry point is called when a network interface is made |
2551 |
* active by the system (IFF_UP). At this point all resources needed |
2552 |
* for transmit and receive operations are allocated, the interrupt |
2553 |
* handler is registered with the OS, the watchdog timer is started, |
2554 |
* and the stack is notified that the interface is ready. |
2555 |
**/ |
2556 |
static int e1000_open(struct net_device *netdev) |
2557 |
{ |
2558 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
2559 |
struct e1000_hw *hw = &adapter->hw; |
2560 |
int err; |
2561 |
|
2562 |
/* disallow open during test */ |
2563 |
if (test_bit(__E1000_TESTING, &adapter->state)) |
2564 |
return -EBUSY; |
2565 |
|
2566 |
/* allocate transmit descriptors */ |
2567 |
err = e1000e_setup_tx_resources(adapter); |
2568 |
if (err) |
2569 |
goto err_setup_tx; |
2570 |
|
2571 |
/* allocate receive descriptors */ |
2572 |
err = e1000e_setup_rx_resources(adapter); |
2573 |
if (err) |
2574 |
goto err_setup_rx; |
2575 |
|
2576 |
e1000e_power_up_phy(adapter); |
2577 |
|
2578 |
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
2579 |
if ((adapter->hw.mng_cookie.status & |
2580 |
E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) |
2581 |
e1000_update_mng_vlan(adapter); |
2582 |
|
2583 |
/* If AMT is enabled, let the firmware know that the network |
2584 |
* interface is now open */ |
2585 |
if ((adapter->flags & FLAG_HAS_AMT) && |
2586 |
e1000e_check_mng_mode(&adapter->hw)) |
2587 |
e1000_get_hw_control(adapter); |
2588 |
|
2589 |
/* before we allocate an interrupt, we must be ready to handle it. |
2590 |
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
2591 |
* as soon as we call pci_request_irq, so we have to setup our |
2592 |
* clean_rx handler before we do so. */ |
2593 |
e1000_configure(adapter); |
2594 |
|
2595 |
err = e1000_request_irq(adapter); |
2596 |
if (err) |
2597 |
goto err_req_irq; |
2598 |
|
2599 |
/* From here on the code is the same as e1000e_up() */ |
2600 |
clear_bit(__E1000_DOWN, &adapter->state); |
2601 |
|
2602 |
napi_enable(&adapter->napi); |
2603 |
|
2604 |
e1000_irq_enable(adapter); |
2605 |
|
2606 |
/* fire a link status change interrupt to start the watchdog */ |
2607 |
ew32(ICS, E1000_ICS_LSC); |
2608 |
|
2609 |
return 0; |
2610 |
|
2611 |
err_req_irq: |
2612 |
e1000_release_hw_control(adapter); |
2613 |
e1000_power_down_phy(adapter); |
2614 |
e1000e_free_rx_resources(adapter); |
2615 |
err_setup_rx: |
2616 |
e1000e_free_tx_resources(adapter); |
2617 |
err_setup_tx: |
2618 |
e1000e_reset(adapter); |
2619 |
|
2620 |
return err; |
2621 |
} |
2622 |
|
2623 |
/** |
2624 |
* e1000_close - Disables a network interface |
2625 |
* @netdev: network interface device structure |
2626 |
* |
2627 |
* Returns 0, this is not allowed to fail |
2628 |
* |
2629 |
* The close entry point is called when an interface is de-activated |
2630 |
* by the OS. The hardware is still under the drivers control, but |
2631 |
* needs to be disabled. A global MAC reset is issued to stop the |
2632 |
* hardware, and all transmit and receive resources are freed. |
2633 |
**/ |
2634 |
static int e1000_close(struct net_device *netdev) |
2635 |
{ |
2636 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
2637 |
|
2638 |
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); |
2639 |
e1000e_down(adapter); |
2640 |
e1000_power_down_phy(adapter); |
2641 |
e1000_free_irq(adapter); |
2642 |
|
2643 |
e1000e_free_tx_resources(adapter); |
2644 |
e1000e_free_rx_resources(adapter); |
2645 |
|
2646 |
/* kill manageability vlan ID if supported, but not if a vlan with |
2647 |
* the same ID is registered on the host OS (let 8021q kill it) */ |
2648 |
if ((adapter->hw.mng_cookie.status & |
2649 |
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
2650 |
!(adapter->vlgrp && |
2651 |
vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) |
2652 |
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
2653 |
|
2654 |
/* If AMT is enabled, let the firmware know that the network |
2655 |
* interface is now closed */ |
2656 |
if ((adapter->flags & FLAG_HAS_AMT) && |
2657 |
e1000e_check_mng_mode(&adapter->hw)) |
2658 |
e1000_release_hw_control(adapter); |
2659 |
|
2660 |
return 0; |
2661 |
} |
2662 |
/** |
2663 |
* e1000_set_mac - Change the Ethernet Address of the NIC |
2664 |
* @netdev: network interface device structure |
2665 |
* @p: pointer to an address structure |
2666 |
* |
2667 |
* Returns 0 on success, negative on failure |
2668 |
**/ |
2669 |
static int e1000_set_mac(struct net_device *netdev, void *p) |
2670 |
{ |
2671 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
2672 |
struct sockaddr *addr = p; |
2673 |
|
2674 |
if (!is_valid_ether_addr(addr->sa_data)) |
2675 |
return -EADDRNOTAVAIL; |
2676 |
|
2677 |
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
2678 |
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); |
2679 |
|
2680 |
e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); |
2681 |
|
2682 |
if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { |
2683 |
/* activate the work around */ |
2684 |
e1000e_set_laa_state_82571(&adapter->hw, 1); |
2685 |
|
2686 |
/* Hold a copy of the LAA in RAR[14] This is done so that |
2687 |
* between the time RAR[0] gets clobbered and the time it |
2688 |
* gets fixed (in e1000_watchdog), the actual LAA is in one |
2689 |
* of the RARs and no incoming packets directed to this port |
2690 |
* are dropped. Eventually the LAA will be in RAR[0] and |
2691 |
* RAR[14] */ |
2692 |
e1000e_rar_set(&adapter->hw, |
2693 |
adapter->hw.mac.addr, |
2694 |
adapter->hw.mac.rar_entry_count - 1); |
2695 |
} |
2696 |
|
2697 |
return 0; |
2698 |
} |
2699 |
|
2700 |
/* Need to wait a few seconds after link up to get diagnostic information from |
2701 |
* the phy */ |
2702 |
static void e1000_update_phy_info(unsigned long data) |
2703 |
{ |
2704 |
struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2705 |
e1000_get_phy_info(&adapter->hw); |
2706 |
} |
2707 |
|
2708 |
/** |
2709 |
* e1000e_update_stats - Update the board statistics counters |
2710 |
* @adapter: board private structure |
2711 |
**/ |
2712 |
void e1000e_update_stats(struct e1000_adapter *adapter) |
2713 |
{ |
2714 |
struct e1000_hw *hw = &adapter->hw; |
2715 |
struct pci_dev *pdev = adapter->pdev; |
2716 |
unsigned long irq_flags; |
2717 |
u16 phy_tmp; |
2718 |
|
2719 |
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
2720 |
|
2721 |
/* |
2722 |
* Prevent stats update while adapter is being reset, or if the pci |
2723 |
* connection is down. |
2724 |
*/ |
2725 |
if (adapter->link_speed == 0) |
2726 |
return; |
2727 |
if (pci_channel_offline(pdev)) |
2728 |
return; |
2729 |
|
2730 |
spin_lock_irqsave(&adapter->stats_lock, irq_flags); |
2731 |
|
2732 |
/* these counters are modified from e1000_adjust_tbi_stats, |
2733 |
* called from the interrupt context, so they must only |
2734 |
* be written while holding adapter->stats_lock |
2735 |
*/ |
2736 |
|
2737 |
adapter->stats.crcerrs += er32(CRCERRS); |
2738 |
adapter->stats.gprc += er32(GPRC); |
2739 |
adapter->stats.gorcl += er32(GORCL); |
2740 |
adapter->stats.gorch += er32(GORCH); |
2741 |
adapter->stats.bprc += er32(BPRC); |
2742 |
adapter->stats.mprc += er32(MPRC); |
2743 |
adapter->stats.roc += er32(ROC); |
2744 |
|
2745 |
if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) { |
2746 |
adapter->stats.prc64 += er32(PRC64); |
2747 |
adapter->stats.prc127 += er32(PRC127); |
2748 |
adapter->stats.prc255 += er32(PRC255); |
2749 |
adapter->stats.prc511 += er32(PRC511); |
2750 |
adapter->stats.prc1023 += er32(PRC1023); |
2751 |
adapter->stats.prc1522 += er32(PRC1522); |
2752 |
adapter->stats.symerrs += er32(SYMERRS); |
2753 |
adapter->stats.sec += er32(SEC); |
2754 |
} |
2755 |
|
2756 |
adapter->stats.mpc += er32(MPC); |
2757 |
adapter->stats.scc += er32(SCC); |
2758 |
adapter->stats.ecol += er32(ECOL); |
2759 |
adapter->stats.mcc += er32(MCC); |
2760 |
adapter->stats.latecol += er32(LATECOL); |
2761 |
adapter->stats.dc += er32(DC); |
2762 |
adapter->stats.rlec += er32(RLEC); |
2763 |
adapter->stats.xonrxc += er32(XONRXC); |
2764 |
adapter->stats.xontxc += er32(XONTXC); |
2765 |
adapter->stats.xoffrxc += er32(XOFFRXC); |
2766 |
adapter->stats.xofftxc += er32(XOFFTXC); |
2767 |
adapter->stats.fcruc += er32(FCRUC); |
2768 |
adapter->stats.gptc += er32(GPTC); |
2769 |
adapter->stats.gotcl += er32(GOTCL); |
2770 |
adapter->stats.gotch += er32(GOTCH); |
2771 |
adapter->stats.rnbc += er32(RNBC); |
2772 |
adapter->stats.ruc += er32(RUC); |
2773 |
adapter->stats.rfc += er32(RFC); |
2774 |
adapter->stats.rjc += er32(RJC); |
2775 |
adapter->stats.torl += er32(TORL); |
2776 |
adapter->stats.torh += er32(TORH); |
2777 |
adapter->stats.totl += er32(TOTL); |
2778 |
adapter->stats.toth += er32(TOTH); |
2779 |
adapter->stats.tpr += er32(TPR); |
2780 |
|
2781 |
if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) { |
2782 |
adapter->stats.ptc64 += er32(PTC64); |
2783 |
adapter->stats.ptc127 += er32(PTC127); |
2784 |
adapter->stats.ptc255 += er32(PTC255); |
2785 |
adapter->stats.ptc511 += er32(PTC511); |
2786 |
adapter->stats.ptc1023 += er32(PTC1023); |
2787 |
adapter->stats.ptc1522 += er32(PTC1522); |
2788 |
} |
2789 |
|
2790 |
adapter->stats.mptc += er32(MPTC); |
2791 |
adapter->stats.bptc += er32(BPTC); |
2792 |
|
2793 |
/* used for adaptive IFS */ |
2794 |
|
2795 |
hw->mac.tx_packet_delta = er32(TPT); |
2796 |
adapter->stats.tpt += hw->mac.tx_packet_delta; |
2797 |
hw->mac.collision_delta = er32(COLC); |
2798 |
adapter->stats.colc += hw->mac.collision_delta; |
2799 |
|
2800 |
adapter->stats.algnerrc += er32(ALGNERRC); |
2801 |
adapter->stats.rxerrc += er32(RXERRC); |
2802 |
adapter->stats.tncrs += er32(TNCRS); |
2803 |
adapter->stats.cexterr += er32(CEXTERR); |
2804 |
adapter->stats.tsctc += er32(TSCTC); |
2805 |
adapter->stats.tsctfc += er32(TSCTFC); |
2806 |
|
2807 |
adapter->stats.iac += er32(IAC); |
2808 |
|
2809 |
if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) { |
2810 |
adapter->stats.icrxoc += er32(ICRXOC); |
2811 |
adapter->stats.icrxptc += er32(ICRXPTC); |
2812 |
adapter->stats.icrxatc += er32(ICRXATC); |
2813 |
adapter->stats.ictxptc += er32(ICTXPTC); |
2814 |
adapter->stats.ictxatc += er32(ICTXATC); |
2815 |
adapter->stats.ictxqec += er32(ICTXQEC); |
2816 |
adapter->stats.ictxqmtc += er32(ICTXQMTC); |
2817 |
adapter->stats.icrxdmtc += er32(ICRXDMTC); |
2818 |
} |
2819 |
|
2820 |
/* Fill out the OS statistics structure */ |
2821 |
adapter->net_stats.rx_packets = adapter->stats.gprc; |
2822 |
adapter->net_stats.tx_packets = adapter->stats.gptc; |
2823 |
adapter->net_stats.rx_bytes = adapter->stats.gorcl; |
2824 |
adapter->net_stats.tx_bytes = adapter->stats.gotcl; |
2825 |
adapter->net_stats.multicast = adapter->stats.mprc; |
2826 |
adapter->net_stats.collisions = adapter->stats.colc; |
2827 |
|
2828 |
/* Rx Errors */ |
2829 |
|
2830 |
/* RLEC on some newer hardware can be incorrect so build |
2831 |
* our own version based on RUC and ROC */ |
2832 |
adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
2833 |
adapter->stats.crcerrs + adapter->stats.algnerrc + |
2834 |
adapter->stats.ruc + adapter->stats.roc + |
2835 |
adapter->stats.cexterr; |
2836 |
adapter->net_stats.rx_length_errors = adapter->stats.ruc + |
2837 |
adapter->stats.roc; |
2838 |
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
2839 |
adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; |
2840 |
adapter->net_stats.rx_missed_errors = adapter->stats.mpc; |
2841 |
|
2842 |
/* Tx Errors */ |
2843 |
adapter->net_stats.tx_errors = adapter->stats.ecol + |
2844 |
adapter->stats.latecol; |
2845 |
adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; |
2846 |
adapter->net_stats.tx_window_errors = adapter->stats.latecol; |
2847 |
adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; |
2848 |
|
2849 |
/* Tx Dropped needs to be maintained elsewhere */ |
2850 |
|
2851 |
/* Phy Stats */ |
2852 |
if (hw->media_type == e1000_media_type_copper) { |
2853 |
if ((adapter->link_speed == SPEED_1000) && |
2854 |
(!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { |
2855 |
phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; |
2856 |
adapter->phy_stats.idle_errors += phy_tmp; |
2857 |
} |
2858 |
} |
2859 |
|
2860 |
/* Management Stats */ |
2861 |
adapter->stats.mgptc += er32(MGTPTC); |
2862 |
adapter->stats.mgprc += er32(MGTPRC); |
2863 |
adapter->stats.mgpdc += er32(MGTPDC); |
2864 |
|
2865 |
spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); |
2866 |
} |
2867 |
|
2868 |
static void e1000_print_link_info(struct e1000_adapter *adapter) |
2869 |
{ |
2870 |
struct net_device *netdev = adapter->netdev; |
2871 |
struct e1000_hw *hw = &adapter->hw; |
2872 |
u32 ctrl = er32(CTRL); |
2873 |
|
2874 |
ndev_info(netdev, |
2875 |
"Link is Up %d Mbps %s, Flow Control: %s\n", |
2876 |
adapter->link_speed, |
2877 |
(adapter->link_duplex == FULL_DUPLEX) ? |
2878 |
"Full Duplex" : "Half Duplex", |
2879 |
((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? |
2880 |
"RX/TX" : |
2881 |
((ctrl & E1000_CTRL_RFCE) ? "RX" : |
2882 |
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); |
2883 |
} |
2884 |
|
2885 |
/** |
2886 |
* e1000_watchdog - Timer Call-back |
2887 |
* @data: pointer to adapter cast into an unsigned long |
2888 |
**/ |
2889 |
static void e1000_watchdog(unsigned long data) |
2890 |
{ |
2891 |
struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2892 |
|
2893 |
/* Do the rest outside of interrupt context */ |
2894 |
schedule_work(&adapter->watchdog_task); |
2895 |
|
2896 |
/* TODO: make this use queue_delayed_work() */ |
2897 |
} |
2898 |
|
2899 |
static void e1000_watchdog_task(struct work_struct *work) |
2900 |
{ |
2901 |
struct e1000_adapter *adapter = container_of(work, |
2902 |
struct e1000_adapter, watchdog_task); |
2903 |
|
2904 |
struct net_device *netdev = adapter->netdev; |
2905 |
struct e1000_mac_info *mac = &adapter->hw.mac; |
2906 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
2907 |
struct e1000_hw *hw = &adapter->hw; |
2908 |
u32 link, tctl; |
2909 |
s32 ret_val; |
2910 |
int tx_pending = 0; |
2911 |
|
2912 |
if ((netif_carrier_ok(netdev)) && |
2913 |
(er32(STATUS) & E1000_STATUS_LU)) |
2914 |
goto link_up; |
2915 |
|
2916 |
ret_val = mac->ops.check_for_link(hw); |
2917 |
if ((ret_val == E1000_ERR_PHY) && |
2918 |
(adapter->hw.phy.type == e1000_phy_igp_3) && |
2919 |
(er32(CTRL) & |
2920 |
E1000_PHY_CTRL_GBE_DISABLE)) { |
2921 |
/* See e1000_kmrn_lock_loss_workaround_ich8lan() */ |
2922 |
ndev_info(netdev, |
2923 |
"Gigabit has been disabled, downgrading speed\n"); |
2924 |
} |
2925 |
|
2926 |
if ((e1000e_enable_tx_pkt_filtering(hw)) && |
2927 |
(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) |
2928 |
e1000_update_mng_vlan(adapter); |
2929 |
|
2930 |
if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && |
2931 |
!(er32(TXCW) & E1000_TXCW_ANE)) |
2932 |
link = adapter->hw.mac.serdes_has_link; |
2933 |
else |
2934 |
link = er32(STATUS) & E1000_STATUS_LU; |
2935 |
|
2936 |
if (link) { |
2937 |
if (!netif_carrier_ok(netdev)) { |
2938 |
bool txb2b = 1; |
2939 |
mac->ops.get_link_up_info(&adapter->hw, |
2940 |
&adapter->link_speed, |
2941 |
&adapter->link_duplex); |
2942 |
e1000_print_link_info(adapter); |
2943 |
/* tweak tx_queue_len according to speed/duplex |
2944 |
* and adjust the timeout factor */ |
2945 |
netdev->tx_queue_len = adapter->tx_queue_len; |
2946 |
adapter->tx_timeout_factor = 1; |
2947 |
switch (adapter->link_speed) { |
2948 |
case SPEED_10: |
2949 |
txb2b = 0; |
2950 |
netdev->tx_queue_len = 10; |
2951 |
adapter->tx_timeout_factor = 14; |
2952 |
break; |
2953 |
case SPEED_100: |
2954 |
txb2b = 0; |
2955 |
netdev->tx_queue_len = 100; |
2956 |
/* maybe add some timeout factor ? */ |
2957 |
break; |
2958 |
} |
2959 |
|
2960 |
/* workaround: re-program speed mode bit after |
2961 |
* link-up event */ |
2962 |
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && |
2963 |
!txb2b) { |
2964 |
u32 tarc0; |
2965 |
tarc0 = er32(TARC0); |
2966 |
tarc0 &= ~SPEED_MODE_BIT; |
2967 |
ew32(TARC0, tarc0); |
2968 |
} |
2969 |
|
2970 |
/* disable TSO for pcie and 10/100 speeds, to avoid |
2971 |
* some hardware issues */ |
2972 |
if (!(adapter->flags & FLAG_TSO_FORCE)) { |
2973 |
switch (adapter->link_speed) { |
2974 |
case SPEED_10: |
2975 |
case SPEED_100: |
2976 |
ndev_info(netdev, |
2977 |
"10/100 speed: disabling TSO\n"); |
2978 |
netdev->features &= ~NETIF_F_TSO; |
2979 |
netdev->features &= ~NETIF_F_TSO6; |
2980 |
break; |
2981 |
case SPEED_1000: |
2982 |
netdev->features |= NETIF_F_TSO; |
2983 |
netdev->features |= NETIF_F_TSO6; |
2984 |
break; |
2985 |
default: |
2986 |
/* oops */ |
2987 |
break; |
2988 |
} |
2989 |
} |
2990 |
|
2991 |
/* enable transmits in the hardware, need to do this |
2992 |
* after setting TARC0 */ |
2993 |
tctl = er32(TCTL); |
2994 |
tctl |= E1000_TCTL_EN; |
2995 |
ew32(TCTL, tctl); |
2996 |
|
2997 |
netif_carrier_on(netdev); |
2998 |
netif_wake_queue(netdev); |
2999 |
|
3000 |
if (!test_bit(__E1000_DOWN, &adapter->state)) |
3001 |
mod_timer(&adapter->phy_info_timer, |
3002 |
round_jiffies(jiffies + 2 * HZ)); |
3003 |
} else { |
3004 |
/* make sure the receive unit is started */ |
3005 |
if (adapter->flags & FLAG_RX_NEEDS_RESTART) { |
3006 |
u32 rctl = er32(RCTL); |
3007 |
ew32(RCTL, rctl | |
3008 |
E1000_RCTL_EN); |
3009 |
} |
3010 |
} |
3011 |
} else { |
3012 |
if (netif_carrier_ok(netdev)) { |
3013 |
adapter->link_speed = 0; |
3014 |
adapter->link_duplex = 0; |
3015 |
ndev_info(netdev, "Link is Down\n"); |
3016 |
netif_carrier_off(netdev); |
3017 |
netif_stop_queue(netdev); |
3018 |
if (!test_bit(__E1000_DOWN, &adapter->state)) |
3019 |
mod_timer(&adapter->phy_info_timer, |
3020 |
round_jiffies(jiffies + 2 * HZ)); |
3021 |
|
3022 |
if (adapter->flags & FLAG_RX_NEEDS_RESTART) |
3023 |
schedule_work(&adapter->reset_task); |
3024 |
} |
3025 |
} |
3026 |
|
3027 |
link_up: |
3028 |
e1000e_update_stats(adapter); |
3029 |
|
3030 |
mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; |
3031 |
adapter->tpt_old = adapter->stats.tpt; |
3032 |
mac->collision_delta = adapter->stats.colc - adapter->colc_old; |
3033 |
adapter->colc_old = adapter->stats.colc; |
3034 |
|
3035 |
adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; |
3036 |
adapter->gorcl_old = adapter->stats.gorcl; |
3037 |
adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; |
3038 |
adapter->gotcl_old = adapter->stats.gotcl; |
3039 |
|
3040 |
e1000e_update_adaptive(&adapter->hw); |
3041 |
|
3042 |
if (!netif_carrier_ok(netdev)) { |
3043 |
tx_pending = (e1000_desc_unused(tx_ring) + 1 < |
3044 |
tx_ring->count); |
3045 |
if (tx_pending) { |
3046 |
/* We've lost link, so the controller stops DMA, |
3047 |
* but we've got queued Tx work that's never going |
3048 |
* to get done, so reset controller to flush Tx. |
3049 |
* (Do the reset outside of interrupt context). */ |
3050 |
adapter->tx_timeout_count++; |
3051 |
schedule_work(&adapter->reset_task); |
3052 |
} |
3053 |
} |
3054 |
|
3055 |
/* Cause software interrupt to ensure rx ring is cleaned */ |
3056 |
ew32(ICS, E1000_ICS_RXDMT0); |
3057 |
|
3058 |
/* Force detection of hung controller every watchdog period */ |
3059 |
adapter->detect_tx_hung = 1; |
3060 |
|
3061 |
/* With 82571 controllers, LAA may be overwritten due to controller |
3062 |
* reset from the other port. Set the appropriate LAA in RAR[0] */ |
3063 |
if (e1000e_get_laa_state_82571(hw)) |
3064 |
e1000e_rar_set(hw, adapter->hw.mac.addr, 0); |
3065 |
|
3066 |
/* Reset the timer */ |
3067 |
if (!test_bit(__E1000_DOWN, &adapter->state)) |
3068 |
mod_timer(&adapter->watchdog_timer, |
3069 |
round_jiffies(jiffies + 2 * HZ)); |
3070 |
} |
3071 |
|
3072 |
#define E1000_TX_FLAGS_CSUM 0x00000001 |
3073 |
#define E1000_TX_FLAGS_VLAN 0x00000002 |
3074 |
#define E1000_TX_FLAGS_TSO 0x00000004 |
3075 |
#define E1000_TX_FLAGS_IPV4 0x00000008 |
3076 |
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
3077 |
#define E1000_TX_FLAGS_VLAN_SHIFT 16 |
3078 |
|
3079 |
static int e1000_tso(struct e1000_adapter *adapter, |
3080 |
struct sk_buff *skb) |
3081 |
{ |
3082 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
3083 |
struct e1000_context_desc *context_desc; |
3084 |
struct e1000_buffer *buffer_info; |
3085 |
unsigned int i; |
3086 |
u32 cmd_length = 0; |
3087 |
u16 ipcse = 0, tucse, mss; |
3088 |
u8 ipcss, ipcso, tucss, tucso, hdr_len; |
3089 |
int err; |
3090 |
|
3091 |
if (skb_is_gso(skb)) { |
3092 |
if (skb_header_cloned(skb)) { |
3093 |
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
3094 |
if (err) |
3095 |
return err; |
3096 |
} |
3097 |
|
3098 |
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3099 |
mss = skb_shinfo(skb)->gso_size; |
3100 |
if (skb->protocol == htons(ETH_P_IP)) { |
3101 |
struct iphdr *iph = ip_hdr(skb); |
3102 |
iph->tot_len = 0; |
3103 |
iph->check = 0; |
3104 |
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
3105 |
iph->daddr, 0, |
3106 |
IPPROTO_TCP, |
3107 |
0); |
3108 |
cmd_length = E1000_TXD_CMD_IP; |
3109 |
ipcse = skb_transport_offset(skb) - 1; |
3110 |
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { |
3111 |
ipv6_hdr(skb)->payload_len = 0; |
3112 |
tcp_hdr(skb)->check = |
3113 |
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3114 |
&ipv6_hdr(skb)->daddr, |
3115 |
0, IPPROTO_TCP, 0); |
3116 |
ipcse = 0; |
3117 |
} |
3118 |
ipcss = skb_network_offset(skb); |
3119 |
ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; |
3120 |
tucss = skb_transport_offset(skb); |
3121 |
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; |
3122 |
tucse = 0; |
3123 |
|
3124 |
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
3125 |
E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
3126 |
|
3127 |
i = tx_ring->next_to_use; |
3128 |
context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
3129 |
buffer_info = &tx_ring->buffer_info[i]; |
3130 |
|
3131 |
context_desc->lower_setup.ip_fields.ipcss = ipcss; |
3132 |
context_desc->lower_setup.ip_fields.ipcso = ipcso; |
3133 |
context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); |
3134 |
context_desc->upper_setup.tcp_fields.tucss = tucss; |
3135 |
context_desc->upper_setup.tcp_fields.tucso = tucso; |
3136 |
context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); |
3137 |
context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); |
3138 |
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; |
3139 |
context_desc->cmd_and_length = cpu_to_le32(cmd_length); |
3140 |
|
3141 |
buffer_info->time_stamp = jiffies; |
3142 |
buffer_info->next_to_watch = i; |
3143 |
|
3144 |
i++; |
3145 |
if (i == tx_ring->count) |
3146 |
i = 0; |
3147 |
tx_ring->next_to_use = i; |
3148 |
|
3149 |
return 1; |
3150 |
} |
3151 |
|
3152 |
return 0; |
3153 |
} |
3154 |
|
3155 |
static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) |
3156 |
{ |
3157 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
3158 |
struct e1000_context_desc *context_desc; |
3159 |
struct e1000_buffer *buffer_info; |
3160 |
unsigned int i; |
3161 |
u8 css; |
3162 |
|
3163 |
if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3164 |
css = skb_transport_offset(skb); |
3165 |
|
3166 |
i = tx_ring->next_to_use; |
3167 |
buffer_info = &tx_ring->buffer_info[i]; |
3168 |
context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
3169 |
|
3170 |
context_desc->lower_setup.ip_config = 0; |
3171 |
context_desc->upper_setup.tcp_fields.tucss = css; |
3172 |
context_desc->upper_setup.tcp_fields.tucso = |
3173 |
css + skb->csum_offset; |
3174 |
context_desc->upper_setup.tcp_fields.tucse = 0; |
3175 |
context_desc->tcp_seg_setup.data = 0; |
3176 |
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); |
3177 |
|
3178 |
buffer_info->time_stamp = jiffies; |
3179 |
buffer_info->next_to_watch = i; |
3180 |
|
3181 |
i++; |
3182 |
if (i == tx_ring->count) |
3183 |
i = 0; |
3184 |
tx_ring->next_to_use = i; |
3185 |
|
3186 |
return 1; |
3187 |
} |
3188 |
|
3189 |
return 0; |
3190 |
} |
3191 |
|
3192 |
#define E1000_MAX_PER_TXD 8192 |
3193 |
#define E1000_MAX_TXD_PWR 12 |
3194 |
|
3195 |
static int e1000_tx_map(struct e1000_adapter *adapter, |
3196 |
struct sk_buff *skb, unsigned int first, |
3197 |
unsigned int max_per_txd, unsigned int nr_frags, |
3198 |
unsigned int mss) |
3199 |
{ |
3200 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
3201 |
struct e1000_buffer *buffer_info; |
3202 |
unsigned int len = skb->len - skb->data_len; |
3203 |
unsigned int offset = 0, size, count = 0, i; |
3204 |
unsigned int f; |
3205 |
|
3206 |
i = tx_ring->next_to_use; |
3207 |
|
3208 |
while (len) { |
3209 |
buffer_info = &tx_ring->buffer_info[i]; |
3210 |
size = min(len, max_per_txd); |
3211 |
|
3212 |
/* Workaround for premature desc write-backs |
3213 |
* in TSO mode. Append 4-byte sentinel desc */ |
3214 |
if (mss && !nr_frags && size == len && size > 8) |
3215 |
size -= 4; |
3216 |
|
3217 |
buffer_info->length = size; |
3218 |
/* set time_stamp *before* dma to help avoid a possible race */ |
3219 |
buffer_info->time_stamp = jiffies; |
3220 |
buffer_info->dma = |
3221 |
pci_map_single(adapter->pdev, |
3222 |
skb->data + offset, |
3223 |
size, |
3224 |
PCI_DMA_TODEVICE); |
3225 |
if (pci_dma_mapping_error(buffer_info->dma)) { |
3226 |
dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); |
3227 |
adapter->tx_dma_failed++; |
3228 |
return -1; |
3229 |
} |
3230 |
buffer_info->next_to_watch = i; |
3231 |
|
3232 |
len -= size; |
3233 |
offset += size; |
3234 |
count++; |
3235 |
i++; |
3236 |
if (i == tx_ring->count) |
3237 |
i = 0; |
3238 |
} |
3239 |
|
3240 |
for (f = 0; f < nr_frags; f++) { |
3241 |
struct skb_frag_struct *frag; |
3242 |
|
3243 |
frag = &skb_shinfo(skb)->frags[f]; |
3244 |
len = frag->size; |
3245 |
offset = frag->page_offset; |
3246 |
|
3247 |
while (len) { |
3248 |
buffer_info = &tx_ring->buffer_info[i]; |
3249 |
size = min(len, max_per_txd); |
3250 |
/* Workaround for premature desc write-backs |
3251 |
* in TSO mode. Append 4-byte sentinel desc */ |
3252 |
if (mss && f == (nr_frags-1) && size == len && size > 8) |
3253 |
size -= 4; |
3254 |
|
3255 |
buffer_info->length = size; |
3256 |
buffer_info->time_stamp = jiffies; |
3257 |
buffer_info->dma = |
3258 |
pci_map_page(adapter->pdev, |
3259 |
frag->page, |
3260 |
offset, |
3261 |
size, |
3262 |
PCI_DMA_TODEVICE); |
3263 |
if (pci_dma_mapping_error(buffer_info->dma)) { |
3264 |
dev_err(&adapter->pdev->dev, |
3265 |
"TX DMA page map failed\n"); |
3266 |
adapter->tx_dma_failed++; |
3267 |
return -1; |
3268 |
} |
3269 |
|
3270 |
buffer_info->next_to_watch = i; |
3271 |
|
3272 |
len -= size; |
3273 |
offset += size; |
3274 |
count++; |
3275 |
|
3276 |
i++; |
3277 |
if (i == tx_ring->count) |
3278 |
i = 0; |
3279 |
} |
3280 |
} |
3281 |
|
3282 |
if (i == 0) |
3283 |
i = tx_ring->count - 1; |
3284 |
else |
3285 |
i--; |
3286 |
|
3287 |
tx_ring->buffer_info[i].skb = skb; |
3288 |
tx_ring->buffer_info[first].next_to_watch = i; |
3289 |
|
3290 |
return count; |
3291 |
} |
3292 |
|
3293 |
static void e1000_tx_queue(struct e1000_adapter *adapter, |
3294 |
int tx_flags, int count) |
3295 |
{ |
3296 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
3297 |
struct e1000_tx_desc *tx_desc = NULL; |
3298 |
struct e1000_buffer *buffer_info; |
3299 |
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
3300 |
unsigned int i; |
3301 |
|
3302 |
if (tx_flags & E1000_TX_FLAGS_TSO) { |
3303 |
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | |
3304 |
E1000_TXD_CMD_TSE; |
3305 |
txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
3306 |
|
3307 |
if (tx_flags & E1000_TX_FLAGS_IPV4) |
3308 |
txd_upper |= E1000_TXD_POPTS_IXSM << 8; |
3309 |
} |
3310 |
|
3311 |
if (tx_flags & E1000_TX_FLAGS_CSUM) { |
3312 |
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; |
3313 |
txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
3314 |
} |
3315 |
|
3316 |
if (tx_flags & E1000_TX_FLAGS_VLAN) { |
3317 |
txd_lower |= E1000_TXD_CMD_VLE; |
3318 |
txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); |
3319 |
} |
3320 |
|
3321 |
i = tx_ring->next_to_use; |
3322 |
|
3323 |
while (count--) { |
3324 |
buffer_info = &tx_ring->buffer_info[i]; |
3325 |
tx_desc = E1000_TX_DESC(*tx_ring, i); |
3326 |
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
3327 |
tx_desc->lower.data = |
3328 |
cpu_to_le32(txd_lower | buffer_info->length); |
3329 |
tx_desc->upper.data = cpu_to_le32(txd_upper); |
3330 |
|
3331 |
i++; |
3332 |
if (i == tx_ring->count) |
3333 |
i = 0; |
3334 |
} |
3335 |
|
3336 |
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
3337 |
|
3338 |
/* Force memory writes to complete before letting h/w |
3339 |
* know there are new descriptors to fetch. (Only |
3340 |
* applicable for weak-ordered memory model archs, |
3341 |
* such as IA-64). */ |
3342 |
wmb(); |
3343 |
|
3344 |
tx_ring->next_to_use = i; |
3345 |
writel(i, adapter->hw.hw_addr + tx_ring->tail); |
3346 |
/* we need this if more than one processor can write to our tail |
3347 |
* at a time, it synchronizes IO on IA64/Altix systems */ |
3348 |
mmiowb(); |
3349 |
} |
3350 |
|
3351 |
#define MINIMUM_DHCP_PACKET_SIZE 282 |
3352 |
static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, |
3353 |
struct sk_buff *skb) |
3354 |
{ |
3355 |
struct e1000_hw *hw = &adapter->hw; |
3356 |
u16 length, offset; |
3357 |
|
3358 |
if (vlan_tx_tag_present(skb)) { |
3359 |
if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) |
3360 |
&& (adapter->hw.mng_cookie.status & |
3361 |
E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) |
3362 |
return 0; |
3363 |
} |
3364 |
|
3365 |
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) |
3366 |
return 0; |
3367 |
|
3368 |
if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) |
3369 |
return 0; |
3370 |
|
3371 |
{ |
3372 |
const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); |
3373 |
struct udphdr *udp; |
3374 |
|
3375 |
if (ip->protocol != IPPROTO_UDP) |
3376 |
return 0; |
3377 |
|
3378 |
udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); |
3379 |
if (ntohs(udp->dest) != 67) |
3380 |
return 0; |
3381 |
|
3382 |
offset = (u8 *)udp + 8 - skb->data; |
3383 |
length = skb->len - offset; |
3384 |
return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); |
3385 |
} |
3386 |
|
3387 |
return 0; |
3388 |
} |
3389 |
|
3390 |
static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) |
3391 |
{ |
3392 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3393 |
|
3394 |
netif_stop_queue(netdev); |
3395 |
/* Herbert's original patch had: |
3396 |
* smp_mb__after_netif_stop_queue(); |
3397 |
* but since that doesn't exist yet, just open code it. */ |
3398 |
smp_mb(); |
3399 |
|
3400 |
/* We need to check again in a case another CPU has just |
3401 |
* made room available. */ |
3402 |
if (e1000_desc_unused(adapter->tx_ring) < size) |
3403 |
return -EBUSY; |
3404 |
|
3405 |
/* A reprieve! */ |
3406 |
netif_start_queue(netdev); |
3407 |
++adapter->restart_queue; |
3408 |
return 0; |
3409 |
} |
3410 |
|
3411 |
static int e1000_maybe_stop_tx(struct net_device *netdev, int size) |
3412 |
{ |
3413 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3414 |
|
3415 |
if (e1000_desc_unused(adapter->tx_ring) >= size) |
3416 |
return 0; |
3417 |
return __e1000_maybe_stop_tx(netdev, size); |
3418 |
} |
3419 |
|
3420 |
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) |
3421 |
static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
3422 |
{ |
3423 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3424 |
struct e1000_ring *tx_ring = adapter->tx_ring; |
3425 |
unsigned int first; |
3426 |
unsigned int max_per_txd = E1000_MAX_PER_TXD; |
3427 |
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
3428 |
unsigned int tx_flags = 0; |
3429 |
unsigned int len = skb->len; |
3430 |
unsigned long irq_flags; |
3431 |
unsigned int nr_frags = 0; |
3432 |
unsigned int mss = 0; |
3433 |
int count = 0; |
3434 |
int tso; |
3435 |
unsigned int f; |
3436 |
len -= skb->data_len; |
3437 |
|
3438 |
if (test_bit(__E1000_DOWN, &adapter->state)) { |
3439 |
dev_kfree_skb_any(skb); |
3440 |
return NETDEV_TX_OK; |
3441 |
} |
3442 |
|
3443 |
if (skb->len <= 0) { |
3444 |
dev_kfree_skb_any(skb); |
3445 |
return NETDEV_TX_OK; |
3446 |
} |
3447 |
|
3448 |
mss = skb_shinfo(skb)->gso_size; |
3449 |
/* The controller does a simple calculation to |
3450 |
* make sure there is enough room in the FIFO before |
3451 |
* initiating the DMA for each buffer. The calc is: |
3452 |
* 4 = ceil(buffer len/mss). To make sure we don't |
3453 |
* overrun the FIFO, adjust the max buffer len if mss |
3454 |
* drops. */ |
3455 |
if (mss) { |
3456 |
u8 hdr_len; |
3457 |
max_per_txd = min(mss << 2, max_per_txd); |
3458 |
max_txd_pwr = fls(max_per_txd) - 1; |
3459 |
|
3460 |
/* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
3461 |
* points to just header, pull a few bytes of payload from |
3462 |
* frags into skb->data */ |
3463 |
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3464 |
if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
3465 |
unsigned int pull_size; |
3466 |
|
3467 |
pull_size = min((unsigned int)4, skb->data_len); |
3468 |
if (!__pskb_pull_tail(skb, pull_size)) { |
3469 |
ndev_err(netdev, |
3470 |
"__pskb_pull_tail failed.\n"); |
3471 |
dev_kfree_skb_any(skb); |
3472 |
return NETDEV_TX_OK; |
3473 |
} |
3474 |
len = skb->len - skb->data_len; |
3475 |
} |
3476 |
} |
3477 |
|
3478 |
/* reserve a descriptor for the offload context */ |
3479 |
if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) |
3480 |
count++; |
3481 |
count++; |
3482 |
|
3483 |
count += TXD_USE_COUNT(len, max_txd_pwr); |
3484 |
|
3485 |
nr_frags = skb_shinfo(skb)->nr_frags; |
3486 |
for (f = 0; f < nr_frags; f++) |
3487 |
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, |
3488 |
max_txd_pwr); |
3489 |
|
3490 |
if (adapter->hw.mac.tx_pkt_filtering) |
3491 |
e1000_transfer_dhcp_info(adapter, skb); |
3492 |
|
3493 |
if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags)) |
3494 |
/* Collision - tell upper layer to requeue */ |
3495 |
return NETDEV_TX_LOCKED; |
3496 |
|
3497 |
/* need: count + 2 desc gap to keep tail from touching |
3498 |
* head, otherwise try next time */ |
3499 |
if (e1000_maybe_stop_tx(netdev, count + 2)) { |
3500 |
spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); |
3501 |
return NETDEV_TX_BUSY; |
3502 |
} |
3503 |
|
3504 |
if (adapter->vlgrp && vlan_tx_tag_present(skb)) { |
3505 |
tx_flags |= E1000_TX_FLAGS_VLAN; |
3506 |
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); |
3507 |
} |
3508 |
|
3509 |
first = tx_ring->next_to_use; |
3510 |
|
3511 |
tso = e1000_tso(adapter, skb); |
3512 |
if (tso < 0) { |
3513 |
dev_kfree_skb_any(skb); |
3514 |
spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); |
3515 |
return NETDEV_TX_OK; |
3516 |
} |
3517 |
|
3518 |
if (tso) |
3519 |
tx_flags |= E1000_TX_FLAGS_TSO; |
3520 |
else if (e1000_tx_csum(adapter, skb)) |
3521 |
tx_flags |= E1000_TX_FLAGS_CSUM; |
3522 |
|
3523 |
/* Old method was to assume IPv4 packet by default if TSO was enabled. |
3524 |
* 82571 hardware supports TSO capabilities for IPv6 as well... |
3525 |
* no longer assume, we must. */ |
3526 |
if (skb->protocol == htons(ETH_P_IP)) |
3527 |
tx_flags |= E1000_TX_FLAGS_IPV4; |
3528 |
|
3529 |
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); |
3530 |
if (count < 0) { |
3531 |
/* handle pci_map_single() error in e1000_tx_map */ |
3532 |
dev_kfree_skb_any(skb); |
3533 |
spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); |
3534 |
return NETDEV_TX_BUSY; |
3535 |
} |
3536 |
|
3537 |
e1000_tx_queue(adapter, tx_flags, count); |
3538 |
|
3539 |
netdev->trans_start = jiffies; |
3540 |
|
3541 |
/* Make sure there is space in the ring for the next send. */ |
3542 |
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); |
3543 |
|
3544 |
spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); |
3545 |
return NETDEV_TX_OK; |
3546 |
} |
3547 |
|
3548 |
/** |
3549 |
* e1000_tx_timeout - Respond to a Tx Hang |
3550 |
* @netdev: network interface device structure |
3551 |
**/ |
3552 |
static void e1000_tx_timeout(struct net_device *netdev) |
3553 |
{ |
3554 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3555 |
|
3556 |
/* Do the reset outside of interrupt context */ |
3557 |
adapter->tx_timeout_count++; |
3558 |
schedule_work(&adapter->reset_task); |
3559 |
} |
3560 |
|
3561 |
static void e1000_reset_task(struct work_struct *work) |
3562 |
{ |
3563 |
struct e1000_adapter *adapter; |
3564 |
adapter = container_of(work, struct e1000_adapter, reset_task); |
3565 |
|
3566 |
e1000e_reinit_locked(adapter); |
3567 |
} |
3568 |
|
3569 |
/** |
3570 |
* e1000_get_stats - Get System Network Statistics |
3571 |
* @netdev: network interface device structure |
3572 |
* |
3573 |
* Returns the address of the device statistics structure. |
3574 |
* The statistics are actually updated from the timer callback. |
3575 |
**/ |
3576 |
static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
3577 |
{ |
3578 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3579 |
|
3580 |
/* only return the current stats */ |
3581 |
return &adapter->net_stats; |
3582 |
} |
3583 |
|
3584 |
/** |
3585 |
* e1000_change_mtu - Change the Maximum Transfer Unit |
3586 |
* @netdev: network interface device structure |
3587 |
* @new_mtu: new value for maximum frame size |
3588 |
* |
3589 |
* Returns 0 on success, negative on failure |
3590 |
**/ |
3591 |
static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
3592 |
{ |
3593 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3594 |
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
3595 |
|
3596 |
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || |
3597 |
(max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3598 |
ndev_err(netdev, "Invalid MTU setting\n"); |
3599 |
return -EINVAL; |
3600 |
} |
3601 |
|
3602 |
/* Jumbo frame size limits */ |
3603 |
if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { |
3604 |
if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { |
3605 |
ndev_err(netdev, "Jumbo Frames not supported.\n"); |
3606 |
return -EINVAL; |
3607 |
} |
3608 |
if (adapter->hw.phy.type == e1000_phy_ife) { |
3609 |
ndev_err(netdev, "Jumbo Frames not supported.\n"); |
3610 |
return -EINVAL; |
3611 |
} |
3612 |
} |
3613 |
|
3614 |
#define MAX_STD_JUMBO_FRAME_SIZE 9234 |
3615 |
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
3616 |
ndev_err(netdev, "MTU > 9216 not supported.\n"); |
3617 |
return -EINVAL; |
3618 |
} |
3619 |
|
3620 |
while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
3621 |
msleep(1); |
3622 |
/* e1000e_down has a dependency on max_frame_size */ |
3623 |
adapter->hw.mac.max_frame_size = max_frame; |
3624 |
if (netif_running(netdev)) |
3625 |
e1000e_down(adapter); |
3626 |
|
3627 |
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3628 |
* means we reserve 2 more, this pushes us to allocate from the next |
3629 |
* larger slab size. |
3630 |
* i.e. RXBUFFER_2048 --> size-4096 slab |
3631 |
* however with the new *_jumbo* routines, jumbo receives will use |
3632 |
* fragmented skbs */ |
3633 |
|
3634 |
if (max_frame <= 256) |
3635 |
adapter->rx_buffer_len = 256; |
3636 |
else if (max_frame <= 512) |
3637 |
adapter->rx_buffer_len = 512; |
3638 |
else if (max_frame <= 1024) |
3639 |
adapter->rx_buffer_len = 1024; |
3640 |
else if (max_frame <= 2048) |
3641 |
adapter->rx_buffer_len = 2048; |
3642 |
else |
3643 |
adapter->rx_buffer_len = 4096; |
3644 |
|
3645 |
/* adjust allocation if LPE protects us, and we aren't using SBP */ |
3646 |
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || |
3647 |
(max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) |
3648 |
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN |
3649 |
+ ETH_FCS_LEN ; |
3650 |
|
3651 |
ndev_info(netdev, "changing MTU from %d to %d\n", |
3652 |
netdev->mtu, new_mtu); |
3653 |
netdev->mtu = new_mtu; |
3654 |
|
3655 |
if (netif_running(netdev)) |
3656 |
e1000e_up(adapter); |
3657 |
else |
3658 |
e1000e_reset(adapter); |
3659 |
|
3660 |
clear_bit(__E1000_RESETTING, &adapter->state); |
3661 |
|
3662 |
return 0; |
3663 |
} |
3664 |
|
3665 |
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
3666 |
int cmd) |
3667 |
{ |
3668 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3669 |
struct mii_ioctl_data *data = if_mii(ifr); |
3670 |
unsigned long irq_flags; |
3671 |
|
3672 |
if (adapter->hw.media_type != e1000_media_type_copper) |
3673 |
return -EOPNOTSUPP; |
3674 |
|
3675 |
switch (cmd) { |
3676 |
case SIOCGMIIPHY: |
3677 |
data->phy_id = adapter->hw.phy.addr; |
3678 |
break; |
3679 |
case SIOCGMIIREG: |
3680 |
if (!capable(CAP_NET_ADMIN)) |
3681 |
return -EPERM; |
3682 |
spin_lock_irqsave(&adapter->stats_lock, irq_flags); |
3683 |
if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F, |
3684 |
&data->val_out)) { |
3685 |
spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); |
3686 |
return -EIO; |
3687 |
} |
3688 |
spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); |
3689 |
break; |
3690 |
case SIOCSMIIREG: |
3691 |
default: |
3692 |
return -EOPNOTSUPP; |
3693 |
} |
3694 |
return 0; |
3695 |
} |
3696 |
|
3697 |
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
3698 |
{ |
3699 |
switch (cmd) { |
3700 |
case SIOCGMIIPHY: |
3701 |
case SIOCGMIIREG: |
3702 |
case SIOCSMIIREG: |
3703 |
return e1000_mii_ioctl(netdev, ifr, cmd); |
3704 |
default: |
3705 |
return -EOPNOTSUPP; |
3706 |
} |
3707 |
} |
3708 |
|
3709 |
static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) |
3710 |
{ |
3711 |
struct net_device *netdev = pci_get_drvdata(pdev); |
3712 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3713 |
struct e1000_hw *hw = &adapter->hw; |
3714 |
u32 ctrl, ctrl_ext, rctl, status; |
3715 |
u32 wufc = adapter->wol; |
3716 |
int retval = 0; |
3717 |
|
3718 |
netif_device_detach(netdev); |
3719 |
|
3720 |
if (netif_running(netdev)) { |
3721 |
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); |
3722 |
e1000e_down(adapter); |
3723 |
e1000_free_irq(adapter); |
3724 |
} |
3725 |
|
3726 |
retval = pci_save_state(pdev); |
3727 |
if (retval) |
3728 |
return retval; |
3729 |
|
3730 |
status = er32(STATUS); |
3731 |
if (status & E1000_STATUS_LU) |
3732 |
wufc &= ~E1000_WUFC_LNKC; |
3733 |
|
3734 |
if (wufc) { |
3735 |
e1000_setup_rctl(adapter); |
3736 |
e1000_set_multi(netdev); |
3737 |
|
3738 |
/* turn on all-multi mode if wake on multicast is enabled */ |
3739 |
if (wufc & E1000_WUFC_MC) { |
3740 |
rctl = er32(RCTL); |
3741 |
rctl |= E1000_RCTL_MPE; |
3742 |
ew32(RCTL, rctl); |
3743 |
} |
3744 |
|
3745 |
ctrl = er32(CTRL); |
3746 |
/* advertise wake from D3Cold */ |
3747 |
#define E1000_CTRL_ADVD3WUC 0x00100000 |
3748 |
/* phy power management enable */ |
3749 |
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 |
3750 |
ctrl |= E1000_CTRL_ADVD3WUC | |
3751 |
E1000_CTRL_EN_PHY_PWR_MGMT; |
3752 |
ew32(CTRL, ctrl); |
3753 |
|
3754 |
if (adapter->hw.media_type == e1000_media_type_fiber || |
3755 |
adapter->hw.media_type == e1000_media_type_internal_serdes) { |
3756 |
/* keep the laser running in D3 */ |
3757 |
ctrl_ext = er32(CTRL_EXT); |
3758 |
ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; |
3759 |
ew32(CTRL_EXT, ctrl_ext); |
3760 |
} |
3761 |
|
3762 |
/* Allow time for pending master requests to run */ |
3763 |
e1000e_disable_pcie_master(&adapter->hw); |
3764 |
|
3765 |
ew32(WUC, E1000_WUC_PME_EN); |
3766 |
ew32(WUFC, wufc); |
3767 |
pci_enable_wake(pdev, PCI_D3hot, 1); |
3768 |
pci_enable_wake(pdev, PCI_D3cold, 1); |
3769 |
} else { |
3770 |
ew32(WUC, 0); |
3771 |
ew32(WUFC, 0); |
3772 |
pci_enable_wake(pdev, PCI_D3hot, 0); |
3773 |
pci_enable_wake(pdev, PCI_D3cold, 0); |
3774 |
} |
3775 |
|
3776 |
e1000_release_manageability(adapter); |
3777 |
|
3778 |
/* make sure adapter isn't asleep if manageability is enabled */ |
3779 |
if (adapter->flags & FLAG_MNG_PT_ENABLED) { |
3780 |
pci_enable_wake(pdev, PCI_D3hot, 1); |
3781 |
pci_enable_wake(pdev, PCI_D3cold, 1); |
3782 |
} |
3783 |
|
3784 |
if (adapter->hw.phy.type == e1000_phy_igp_3) |
3785 |
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); |
3786 |
|
3787 |
/* Release control of h/w to f/w. If f/w is AMT enabled, this |
3788 |
* would have already happened in close and is redundant. */ |
3789 |
e1000_release_hw_control(adapter); |
3790 |
|
3791 |
pci_disable_device(pdev); |
3792 |
|
3793 |
pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
3794 |
|
3795 |
return 0; |
3796 |
} |
3797 |
|
3798 |
#ifdef CONFIG_PM |
3799 |
static int e1000_resume(struct pci_dev *pdev) |
3800 |
{ |
3801 |
struct net_device *netdev = pci_get_drvdata(pdev); |
3802 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3803 |
struct e1000_hw *hw = &adapter->hw; |
3804 |
u32 err; |
3805 |
|
3806 |
pci_set_power_state(pdev, PCI_D0); |
3807 |
pci_restore_state(pdev); |
3808 |
err = pci_enable_device(pdev); |
3809 |
if (err) { |
3810 |
dev_err(&pdev->dev, |
3811 |
"Cannot enable PCI device from suspend\n"); |
3812 |
return err; |
3813 |
} |
3814 |
|
3815 |
pci_set_master(pdev); |
3816 |
|
3817 |
pci_enable_wake(pdev, PCI_D3hot, 0); |
3818 |
pci_enable_wake(pdev, PCI_D3cold, 0); |
3819 |
|
3820 |
if (netif_running(netdev)) { |
3821 |
err = e1000_request_irq(adapter); |
3822 |
if (err) |
3823 |
return err; |
3824 |
} |
3825 |
|
3826 |
e1000e_power_up_phy(adapter); |
3827 |
e1000e_reset(adapter); |
3828 |
ew32(WUS, ~0); |
3829 |
|
3830 |
e1000_init_manageability(adapter); |
3831 |
|
3832 |
if (netif_running(netdev)) |
3833 |
e1000e_up(adapter); |
3834 |
|
3835 |
netif_device_attach(netdev); |
3836 |
|
3837 |
/* If the controller has AMT, do not set DRV_LOAD until the interface |
3838 |
* is up. For all other cases, let the f/w know that the h/w is now |
3839 |
* under the control of the driver. */ |
3840 |
if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) |
3841 |
e1000_get_hw_control(adapter); |
3842 |
|
3843 |
return 0; |
3844 |
} |
3845 |
#endif |
3846 |
|
3847 |
static void e1000_shutdown(struct pci_dev *pdev) |
3848 |
{ |
3849 |
e1000_suspend(pdev, PMSG_SUSPEND); |
3850 |
} |
3851 |
|
3852 |
#ifdef CONFIG_NET_POLL_CONTROLLER |
3853 |
/* |
3854 |
* Polling 'interrupt' - used by things like netconsole to send skbs |
3855 |
* without having to re-enable interrupts. It's not called while |
3856 |
* the interrupt routine is executing. |
3857 |
*/ |
3858 |
static void e1000_netpoll(struct net_device *netdev) |
3859 |
{ |
3860 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3861 |
|
3862 |
disable_irq(adapter->pdev->irq); |
3863 |
e1000_intr(adapter->pdev->irq, netdev); |
3864 |
|
3865 |
e1000_clean_tx_irq(adapter); |
3866 |
|
3867 |
enable_irq(adapter->pdev->irq); |
3868 |
} |
3869 |
#endif |
3870 |
|
3871 |
/** |
3872 |
* e1000_io_error_detected - called when PCI error is detected |
3873 |
* @pdev: Pointer to PCI device |
3874 |
* @state: The current pci connection state |
3875 |
* |
3876 |
* This function is called after a PCI bus error affecting |
3877 |
* this device has been detected. |
3878 |
*/ |
3879 |
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
3880 |
pci_channel_state_t state) |
3881 |
{ |
3882 |
struct net_device *netdev = pci_get_drvdata(pdev); |
3883 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3884 |
|
3885 |
netif_device_detach(netdev); |
3886 |
|
3887 |
if (netif_running(netdev)) |
3888 |
e1000e_down(adapter); |
3889 |
pci_disable_device(pdev); |
3890 |
|
3891 |
/* Request a slot slot reset. */ |
3892 |
return PCI_ERS_RESULT_NEED_RESET; |
3893 |
} |
3894 |
|
3895 |
/** |
3896 |
* e1000_io_slot_reset - called after the pci bus has been reset. |
3897 |
* @pdev: Pointer to PCI device |
3898 |
* |
3899 |
* Restart the card from scratch, as if from a cold-boot. Implementation |
3900 |
* resembles the first-half of the e1000_resume routine. |
3901 |
*/ |
3902 |
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) |
3903 |
{ |
3904 |
struct net_device *netdev = pci_get_drvdata(pdev); |
3905 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3906 |
struct e1000_hw *hw = &adapter->hw; |
3907 |
|
3908 |
if (pci_enable_device(pdev)) { |
3909 |
dev_err(&pdev->dev, |
3910 |
"Cannot re-enable PCI device after reset.\n"); |
3911 |
return PCI_ERS_RESULT_DISCONNECT; |
3912 |
} |
3913 |
pci_set_master(pdev); |
3914 |
|
3915 |
pci_enable_wake(pdev, PCI_D3hot, 0); |
3916 |
pci_enable_wake(pdev, PCI_D3cold, 0); |
3917 |
|
3918 |
e1000e_reset(adapter); |
3919 |
ew32(WUS, ~0); |
3920 |
|
3921 |
return PCI_ERS_RESULT_RECOVERED; |
3922 |
} |
3923 |
|
3924 |
/** |
3925 |
* e1000_io_resume - called when traffic can start flowing again. |
3926 |
* @pdev: Pointer to PCI device |
3927 |
* |
3928 |
* This callback is called when the error recovery driver tells us that |
3929 |
* its OK to resume normal operation. Implementation resembles the |
3930 |
* second-half of the e1000_resume routine. |
3931 |
*/ |
3932 |
static void e1000_io_resume(struct pci_dev *pdev) |
3933 |
{ |
3934 |
struct net_device *netdev = pci_get_drvdata(pdev); |
3935 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
3936 |
|
3937 |
e1000_init_manageability(adapter); |
3938 |
|
3939 |
if (netif_running(netdev)) { |
3940 |
if (e1000e_up(adapter)) { |
3941 |
dev_err(&pdev->dev, |
3942 |
"can't bring device back up after reset\n"); |
3943 |
return; |
3944 |
} |
3945 |
} |
3946 |
|
3947 |
netif_device_attach(netdev); |
3948 |
|
3949 |
/* If the controller has AMT, do not set DRV_LOAD until the interface |
3950 |
* is up. For all other cases, let the f/w know that the h/w is now |
3951 |
* under the control of the driver. */ |
3952 |
if (!(adapter->flags & FLAG_HAS_AMT) || |
3953 |
!e1000e_check_mng_mode(&adapter->hw)) |
3954 |
e1000_get_hw_control(adapter); |
3955 |
|
3956 |
} |
3957 |
|
3958 |
static void e1000_print_device_info(struct e1000_adapter *adapter) |
3959 |
{ |
3960 |
struct e1000_hw *hw = &adapter->hw; |
3961 |
struct net_device *netdev = adapter->netdev; |
3962 |
u32 part_num; |
3963 |
|
3964 |
/* print bus type/speed/width info */ |
3965 |
ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " |
3966 |
"%02x:%02x:%02x:%02x:%02x:%02x\n", |
3967 |
/* bus width */ |
3968 |
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : |
3969 |
"Width x1"), |
3970 |
/* MAC address */ |
3971 |
netdev->dev_addr[0], netdev->dev_addr[1], |
3972 |
netdev->dev_addr[2], netdev->dev_addr[3], |
3973 |
netdev->dev_addr[4], netdev->dev_addr[5]); |
3974 |
ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", |
3975 |
(hw->phy.type == e1000_phy_ife) |
3976 |
? "10/100" : "1000"); |
3977 |
e1000e_read_part_num(hw, &part_num); |
3978 |
ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
3979 |
hw->mac.type, hw->phy.type, |
3980 |
(part_num >> 8), (part_num & 0xff)); |
3981 |
} |
3982 |
|
3983 |
/** |
3984 |
* e1000_probe - Device Initialization Routine |
3985 |
* @pdev: PCI device information struct |
3986 |
* @ent: entry in e1000_pci_tbl |
3987 |
* |
3988 |
* Returns 0 on success, negative on failure |
3989 |
* |
3990 |
* e1000_probe initializes an adapter identified by a pci_dev structure. |
3991 |
* The OS initialization, configuring of the adapter private structure, |
3992 |
* and a hardware reset occur. |
3993 |
**/ |
3994 |
static int __devinit e1000_probe(struct pci_dev *pdev, |
3995 |
const struct pci_device_id *ent) |
3996 |
{ |
3997 |
struct net_device *netdev; |
3998 |
struct e1000_adapter *adapter; |
3999 |
struct e1000_hw *hw; |
4000 |
const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; |
4001 |
unsigned long mmio_start, mmio_len; |
4002 |
unsigned long flash_start, flash_len; |
4003 |
|
4004 |
static int cards_found; |
4005 |
int i, err, pci_using_dac; |
4006 |
u16 eeprom_data = 0; |
4007 |
u16 eeprom_apme_mask = E1000_EEPROM_APME; |
4008 |
|
4009 |
err = pci_enable_device(pdev); |
4010 |
if (err) |
4011 |
return err; |
4012 |
|
4013 |
pci_using_dac = 0; |
4014 |
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); |
4015 |
if (!err) { |
4016 |
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); |
4017 |
if (!err) |
4018 |
pci_using_dac = 1; |
4019 |
} else { |
4020 |
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
4021 |
if (err) { |
4022 |
err = pci_set_consistent_dma_mask(pdev, |
4023 |
DMA_32BIT_MASK); |
4024 |
if (err) { |
4025 |
dev_err(&pdev->dev, "No usable DMA " |
4026 |
"configuration, aborting\n"); |
4027 |
goto err_dma; |
4028 |
} |
4029 |
} |
4030 |
} |
4031 |
|
4032 |
err = pci_request_regions(pdev, e1000e_driver_name); |
4033 |
if (err) |
4034 |
goto err_pci_reg; |
4035 |
|
4036 |
pci_set_master(pdev); |
4037 |
|
4038 |
err = -ENOMEM; |
4039 |
netdev = alloc_etherdev(sizeof(struct e1000_adapter)); |
4040 |
if (!netdev) |
4041 |
goto err_alloc_etherdev; |
4042 |
|
4043 |
SET_MODULE_OWNER(netdev); |
4044 |
SET_NETDEV_DEV(netdev, &pdev->dev); |
4045 |
|
4046 |
pci_set_drvdata(pdev, netdev); |
4047 |
adapter = netdev_priv(netdev); |
4048 |
hw = &adapter->hw; |
4049 |
adapter->netdev = netdev; |
4050 |
adapter->pdev = pdev; |
4051 |
adapter->ei = ei; |
4052 |
adapter->pba = ei->pba; |
4053 |
adapter->flags = ei->flags; |
4054 |
adapter->hw.adapter = adapter; |
4055 |
adapter->hw.mac.type = ei->mac; |
4056 |
adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; |
4057 |
|
4058 |
mmio_start = pci_resource_start(pdev, 0); |
4059 |
mmio_len = pci_resource_len(pdev, 0); |
4060 |
|
4061 |
err = -EIO; |
4062 |
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); |
4063 |
if (!adapter->hw.hw_addr) |
4064 |
goto err_ioremap; |
4065 |
|
4066 |
if ((adapter->flags & FLAG_HAS_FLASH) && |
4067 |
(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
4068 |
flash_start = pci_resource_start(pdev, 1); |
4069 |
flash_len = pci_resource_len(pdev, 1); |
4070 |
adapter->hw.flash_address = ioremap(flash_start, flash_len); |
4071 |
if (!adapter->hw.flash_address) |
4072 |
goto err_flashmap; |
4073 |
} |
4074 |
|
4075 |
/* construct the net_device struct */ |
4076 |
netdev->open = &e1000_open; |
4077 |
netdev->stop = &e1000_close; |
4078 |
netdev->hard_start_xmit = &e1000_xmit_frame; |
4079 |
netdev->get_stats = &e1000_get_stats; |
4080 |
netdev->set_multicast_list = &e1000_set_multi; |
4081 |
netdev->set_mac_address = &e1000_set_mac; |
4082 |
netdev->change_mtu = &e1000_change_mtu; |
4083 |
netdev->do_ioctl = &e1000_ioctl; |
4084 |
e1000e_set_ethtool_ops(netdev); |
4085 |
netdev->tx_timeout = &e1000_tx_timeout; |
4086 |
netdev->watchdog_timeo = 5 * HZ; |
4087 |
netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); |
4088 |
netdev->vlan_rx_register = e1000_vlan_rx_register; |
4089 |
netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; |
4090 |
netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; |
4091 |
#ifdef CONFIG_NET_POLL_CONTROLLER |
4092 |
netdev->poll_controller = e1000_netpoll; |
4093 |
#endif |
4094 |
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); |
4095 |
|
4096 |
netdev->mem_start = mmio_start; |
4097 |
netdev->mem_end = mmio_start + mmio_len; |
4098 |
|
4099 |
adapter->bd_number = cards_found++; |
4100 |
|
4101 |
/* setup adapter struct */ |
4102 |
err = e1000_sw_init(adapter); |
4103 |
if (err) |
4104 |
goto err_sw_init; |
4105 |
|
4106 |
err = -EIO; |
4107 |
|
4108 |
memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); |
4109 |
memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); |
4110 |
memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); |
4111 |
|
4112 |
err = ei->get_invariants(adapter); |
4113 |
if (err) |
4114 |
goto err_hw_init; |
4115 |
|
4116 |
hw->mac.ops.get_bus_info(&adapter->hw); |
4117 |
|
4118 |
adapter->hw.phy.wait_for_link = 0; |
4119 |
|
4120 |
/* Copper options */ |
4121 |
if (adapter->hw.media_type == e1000_media_type_copper) { |
4122 |
adapter->hw.phy.mdix = AUTO_ALL_MODES; |
4123 |
adapter->hw.phy.disable_polarity_correction = 0; |
4124 |
adapter->hw.phy.ms_type = e1000_ms_hw_default; |
4125 |
} |
4126 |
|
4127 |
if (e1000_check_reset_block(&adapter->hw)) |
4128 |
ndev_info(netdev, |
4129 |
"PHY reset is blocked due to SOL/IDER session.\n"); |
4130 |
|
4131 |
netdev->features = NETIF_F_SG | |
4132 |
NETIF_F_HW_CSUM | |
4133 |
NETIF_F_HW_VLAN_TX | |
4134 |
NETIF_F_HW_VLAN_RX; |
4135 |
|
4136 |
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) |
4137 |
netdev->features |= NETIF_F_HW_VLAN_FILTER; |
4138 |
|
4139 |
netdev->features |= NETIF_F_TSO; |
4140 |
netdev->features |= NETIF_F_TSO6; |
4141 |
|
4142 |
if (pci_using_dac) |
4143 |
netdev->features |= NETIF_F_HIGHDMA; |
4144 |
|
4145 |
/* We should not be using LLTX anymore, but we are still TX faster with |
4146 |
* it. */ |
4147 |
netdev->features |= NETIF_F_LLTX; |
4148 |
|
4149 |
if (e1000e_enable_mng_pass_thru(&adapter->hw)) |
4150 |
adapter->flags |= FLAG_MNG_PT_ENABLED; |
4151 |
|
4152 |
/* before reading the NVM, reset the controller to |
4153 |
* put the device in a known good starting state */ |
4154 |
adapter->hw.mac.ops.reset_hw(&adapter->hw); |
4155 |
|
4156 |
/* |
4157 |
* systems with ASPM and others may see the checksum fail on the first |
4158 |
* attempt. Let's give it a few tries |
4159 |
*/ |
4160 |
for (i = 0;; i++) { |
4161 |
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) |
4162 |
break; |
4163 |
if (i == 2) { |
4164 |
ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); |
4165 |
err = -EIO; |
4166 |
goto err_eeprom; |
4167 |
} |
4168 |
} |
4169 |
|
4170 |
/* copy the MAC address out of the NVM */ |
4171 |
if (e1000e_read_mac_addr(&adapter->hw)) |
4172 |
ndev_err(netdev, "NVM Read Error while reading MAC address\n"); |
4173 |
|
4174 |
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); |
4175 |
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); |
4176 |
|
4177 |
if (!is_valid_ether_addr(netdev->perm_addr)) { |
4178 |
ndev_err(netdev, "Invalid MAC Address: " |
4179 |
"%02x:%02x:%02x:%02x:%02x:%02x\n", |
4180 |
netdev->perm_addr[0], netdev->perm_addr[1], |
4181 |
netdev->perm_addr[2], netdev->perm_addr[3], |
4182 |
netdev->perm_addr[4], netdev->perm_addr[5]); |
4183 |
err = -EIO; |
4184 |
goto err_eeprom; |
4185 |
} |
4186 |
|
4187 |
init_timer(&adapter->watchdog_timer); |
4188 |
adapter->watchdog_timer.function = &e1000_watchdog; |
4189 |
adapter->watchdog_timer.data = (unsigned long) adapter; |
4190 |
|
4191 |
init_timer(&adapter->phy_info_timer); |
4192 |
adapter->phy_info_timer.function = &e1000_update_phy_info; |
4193 |
adapter->phy_info_timer.data = (unsigned long) adapter; |
4194 |
|
4195 |
INIT_WORK(&adapter->reset_task, e1000_reset_task); |
4196 |
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); |
4197 |
|
4198 |
e1000e_check_options(adapter); |
4199 |
|
4200 |
/* Initialize link parameters. User can change them with ethtool */ |
4201 |
adapter->hw.mac.autoneg = 1; |
4202 |
adapter->hw.mac.original_fc = e1000_fc_default; |
4203 |
adapter->hw.mac.fc = e1000_fc_default; |
4204 |
adapter->hw.phy.autoneg_advertised = 0x2f; |
4205 |
|
4206 |
/* ring size defaults */ |
4207 |
adapter->rx_ring->count = 256; |
4208 |
adapter->tx_ring->count = 256; |
4209 |
|
4210 |
/* |
4211 |
* Initial Wake on LAN setting - If APM wake is enabled in |
4212 |
* the EEPROM, enable the ACPI Magic Packet filter |
4213 |
*/ |
4214 |
if (adapter->flags & FLAG_APME_IN_WUC) { |
4215 |
/* APME bit in EEPROM is mapped to WUC.APME */ |
4216 |
eeprom_data = er32(WUC); |
4217 |
eeprom_apme_mask = E1000_WUC_APME; |
4218 |
} else if (adapter->flags & FLAG_APME_IN_CTRL3) { |
4219 |
if (adapter->flags & FLAG_APME_CHECK_PORT_B && |
4220 |
(adapter->hw.bus.func == 1)) |
4221 |
e1000_read_nvm(&adapter->hw, |
4222 |
NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
4223 |
else |
4224 |
e1000_read_nvm(&adapter->hw, |
4225 |
NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
4226 |
} |
4227 |
|
4228 |
/* fetch WoL from EEPROM */ |
4229 |
if (eeprom_data & eeprom_apme_mask) |
4230 |
adapter->eeprom_wol |= E1000_WUFC_MAG; |
4231 |
|
4232 |
/* |
4233 |
* now that we have the eeprom settings, apply the special cases |
4234 |
* where the eeprom may be wrong or the board simply won't support |
4235 |
* wake on lan on a particular port |
4236 |
*/ |
4237 |
if (!(adapter->flags & FLAG_HAS_WOL)) |
4238 |
adapter->eeprom_wol = 0; |
4239 |
|
4240 |
/* initialize the wol settings based on the eeprom settings */ |
4241 |
adapter->wol = adapter->eeprom_wol; |
4242 |
|
4243 |
/* reset the hardware with the new settings */ |
4244 |
e1000e_reset(adapter); |
4245 |
|
4246 |
/* If the controller has AMT, do not set DRV_LOAD until the interface |
4247 |
* is up. For all other cases, let the f/w know that the h/w is now |
4248 |
* under the control of the driver. */ |
4249 |
if (!(adapter->flags & FLAG_HAS_AMT) || |
4250 |
!e1000e_check_mng_mode(&adapter->hw)) |
4251 |
e1000_get_hw_control(adapter); |
4252 |
|
4253 |
/* tell the stack to leave us alone until e1000_open() is called */ |
4254 |
netif_carrier_off(netdev); |
4255 |
netif_stop_queue(netdev); |
4256 |
|
4257 |
strcpy(netdev->name, "eth%d"); |
4258 |
err = register_netdev(netdev); |
4259 |
if (err) |
4260 |
goto err_register; |
4261 |
|
4262 |
e1000_print_device_info(adapter); |
4263 |
|
4264 |
return 0; |
4265 |
|
4266 |
err_register: |
4267 |
err_hw_init: |
4268 |
e1000_release_hw_control(adapter); |
4269 |
err_eeprom: |
4270 |
if (!e1000_check_reset_block(&adapter->hw)) |
4271 |
e1000_phy_hw_reset(&adapter->hw); |
4272 |
|
4273 |
if (adapter->hw.flash_address) |
4274 |
iounmap(adapter->hw.flash_address); |
4275 |
|
4276 |
err_flashmap: |
4277 |
kfree(adapter->tx_ring); |
4278 |
kfree(adapter->rx_ring); |
4279 |
err_sw_init: |
4280 |
iounmap(adapter->hw.hw_addr); |
4281 |
err_ioremap: |
4282 |
free_netdev(netdev); |
4283 |
err_alloc_etherdev: |
4284 |
pci_release_regions(pdev); |
4285 |
err_pci_reg: |
4286 |
err_dma: |
4287 |
pci_disable_device(pdev); |
4288 |
return err; |
4289 |
} |
4290 |
|
4291 |
/** |
4292 |
* e1000_remove - Device Removal Routine |
4293 |
* @pdev: PCI device information struct |
4294 |
* |
4295 |
* e1000_remove is called by the PCI subsystem to alert the driver |
4296 |
* that it should release a PCI device. The could be caused by a |
4297 |
* Hot-Plug event, or because the driver is going to be removed from |
4298 |
* memory. |
4299 |
**/ |
4300 |
static void __devexit e1000_remove(struct pci_dev *pdev) |
4301 |
{ |
4302 |
struct net_device *netdev = pci_get_drvdata(pdev); |
4303 |
struct e1000_adapter *adapter = netdev_priv(netdev); |
4304 |
|
4305 |
/* flush_scheduled work may reschedule our watchdog task, so |
4306 |
* explicitly disable watchdog tasks from being rescheduled */ |
4307 |
set_bit(__E1000_DOWN, &adapter->state); |
4308 |
del_timer_sync(&adapter->watchdog_timer); |
4309 |
del_timer_sync(&adapter->phy_info_timer); |
4310 |
|
4311 |
flush_scheduled_work(); |
4312 |
|
4313 |
e1000_release_manageability(adapter); |
4314 |
|
4315 |
/* Release control of h/w to f/w. If f/w is AMT enabled, this |
4316 |
* would have already happened in close and is redundant. */ |
4317 |
e1000_release_hw_control(adapter); |
4318 |
|
4319 |
unregister_netdev(netdev); |
4320 |
|
4321 |
if (!e1000_check_reset_block(&adapter->hw)) |
4322 |
e1000_phy_hw_reset(&adapter->hw); |
4323 |
|
4324 |
kfree(adapter->tx_ring); |
4325 |
kfree(adapter->rx_ring); |
4326 |
|
4327 |
iounmap(adapter->hw.hw_addr); |
4328 |
if (adapter->hw.flash_address) |
4329 |
iounmap(adapter->hw.flash_address); |
4330 |
pci_release_regions(pdev); |
4331 |
|
4332 |
free_netdev(netdev); |
4333 |
|
4334 |
pci_disable_device(pdev); |
4335 |
} |
4336 |
|
4337 |
/* PCI Error Recovery (ERS) */ |
4338 |
static struct pci_error_handlers e1000_err_handler = { |
4339 |
.error_detected = e1000_io_error_detected, |
4340 |
.slot_reset = e1000_io_slot_reset, |
4341 |
.resume = e1000_io_resume, |
4342 |
}; |
4343 |
|
4344 |
static struct pci_device_id e1000_pci_tbl[] = { |
4345 |
/* |
4346 |
* Support for 82571/2/3, es2lan and ich8 will be phased in |
4347 |
* stepwise. |
4348 |
|
4349 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, |
4350 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, |
4351 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, |
4352 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, |
4353 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, |
4354 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, |
4355 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, |
4356 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, |
4357 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, |
4358 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, |
4359 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, |
4360 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, |
4361 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, |
4362 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), |
4363 |
board_80003es2lan }, |
4364 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), |
4365 |
board_80003es2lan }, |
4366 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), |
4367 |
board_80003es2lan }, |
4368 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), |
4369 |
board_80003es2lan }, |
4370 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, |
4371 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, |
4372 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, |
4373 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, |
4374 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, |
4375 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, |
4376 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, |
4377 |
*/ |
4378 |
|
4379 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, |
4380 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, |
4381 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, |
4382 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, |
4383 |
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, |
4384 |
|
4385 |
{ } /* terminate list */ |
4386 |
}; |
4387 |
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
4388 |
|
4389 |
/* PCI Device API Driver */ |
4390 |
static struct pci_driver e1000_driver = { |
4391 |
.name = e1000e_driver_name, |
4392 |
.id_table = e1000_pci_tbl, |
4393 |
.probe = e1000_probe, |
4394 |
.remove = __devexit_p(e1000_remove), |
4395 |
#ifdef CONFIG_PM |
4396 |
/* Power Managment Hooks */ |
4397 |
.suspend = e1000_suspend, |
4398 |
.resume = e1000_resume, |
4399 |
#endif |
4400 |
.shutdown = e1000_shutdown, |
4401 |
.err_handler = &e1000_err_handler |
4402 |
}; |
4403 |
|
4404 |
/** |
4405 |
* e1000_init_module - Driver Registration Routine |
4406 |
* |
4407 |
* e1000_init_module is the first routine called when the driver is |
4408 |
* loaded. All it does is register with the PCI subsystem. |
4409 |
**/ |
4410 |
static int __init e1000_init_module(void) |
4411 |
{ |
4412 |
int ret; |
4413 |
printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", |
4414 |
e1000e_driver_name, e1000e_driver_version); |
4415 |
printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", |
4416 |
e1000e_driver_name); |
4417 |
ret = pci_register_driver(&e1000_driver); |
4418 |
|
4419 |
return ret; |
4420 |
} |
4421 |
module_init(e1000_init_module); |
4422 |
|
4423 |
/** |
4424 |
* e1000_exit_module - Driver Exit Cleanup Routine |
4425 |
* |
4426 |
* e1000_exit_module is called just before the driver is removed |
4427 |
* from memory. |
4428 |
**/ |
4429 |
static void __exit e1000_exit_module(void) |
4430 |
{ |
4431 |
pci_unregister_driver(&e1000_driver); |
4432 |
} |
4433 |
module_exit(e1000_exit_module); |
4434 |
|
4435 |
|
4436 |
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
4437 |
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); |
4438 |
MODULE_LICENSE("GPL"); |
4439 |
MODULE_VERSION(DRV_VERSION); |
4440 |
|
4441 |
/* e1000_main.c */ |