Print this page
6064 ixgbe needs X550 support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 + * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
32 33 */
33 34
34 35 #include "ixgbe_sw.h"
35 36
36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
37 -static char ixgbe_version[] = "ixgbe 1.1.7";
38 38
39 39 /*
40 40 * Local function protoypes
41 41 */
42 42 static int ixgbe_register_mac(ixgbe_t *);
43 43 static int ixgbe_identify_hardware(ixgbe_t *);
44 44 static int ixgbe_regs_map(ixgbe_t *);
45 45 static void ixgbe_init_properties(ixgbe_t *);
46 46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 47 static void ixgbe_init_locks(ixgbe_t *);
48 48 static void ixgbe_destroy_locks(ixgbe_t *);
49 49 static int ixgbe_init(ixgbe_t *);
50 50 static int ixgbe_chip_start(ixgbe_t *);
51 51 static void ixgbe_chip_stop(ixgbe_t *);
52 52 static int ixgbe_reset(ixgbe_t *);
53 53 static void ixgbe_tx_clean(ixgbe_t *);
54 54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
55 55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
56 56 static int ixgbe_alloc_rings(ixgbe_t *);
57 57 static void ixgbe_free_rings(ixgbe_t *);
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
58 58 static int ixgbe_alloc_rx_data(ixgbe_t *);
59 59 static void ixgbe_free_rx_data(ixgbe_t *);
60 60 static void ixgbe_setup_rings(ixgbe_t *);
61 61 static void ixgbe_setup_rx(ixgbe_t *);
62 62 static void ixgbe_setup_tx(ixgbe_t *);
63 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
64 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
65 65 static void ixgbe_setup_rss(ixgbe_t *);
66 66 static void ixgbe_setup_vmdq(ixgbe_t *);
67 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
68 +static void ixgbe_setup_rss_table(ixgbe_t *);
68 69 static void ixgbe_init_unicst(ixgbe_t *);
69 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
70 71 static void ixgbe_setup_multicst(ixgbe_t *);
71 72 static void ixgbe_get_hw_state(ixgbe_t *);
72 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
73 74 static void ixgbe_get_conf(ixgbe_t *);
74 75 static void ixgbe_init_params(ixgbe_t *);
75 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
76 77 static void ixgbe_driver_link_check(ixgbe_t *);
77 78 static void ixgbe_sfp_check(void *);
78 79 static void ixgbe_overtemp_check(void *);
80 +static void ixgbe_phy_check(void *);
79 81 static void ixgbe_link_timer(void *);
80 82 static void ixgbe_local_timer(void *);
81 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
82 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
83 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
84 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
85 87 static boolean_t is_valid_mac_addr(uint8_t *);
86 88 static boolean_t ixgbe_stall_check(ixgbe_t *);
87 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
88 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
89 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
90 92 static int ixgbe_alloc_intrs(ixgbe_t *);
91 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
92 94 static int ixgbe_add_intr_handlers(ixgbe_t *);
93 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
94 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
95 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
96 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
97 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
98 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
99 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
100 102 static void ixgbe_setup_adapter_vector(ixgbe_t *);
101 103 static void ixgbe_rem_intr_handlers(ixgbe_t *);
102 104 static void ixgbe_rem_intrs(ixgbe_t *);
103 105 static int ixgbe_enable_intrs(ixgbe_t *);
104 106 static int ixgbe_disable_intrs(ixgbe_t *);
105 107 static uint_t ixgbe_intr_legacy(void *, void *);
106 108 static uint_t ixgbe_intr_msi(void *, void *);
107 109 static uint_t ixgbe_intr_msix(void *, void *);
108 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
109 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
110 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
111 113 static void ixgbe_get_driver_control(struct ixgbe_hw *);
112 114 static int ixgbe_addmac(void *, const uint8_t *);
113 115 static int ixgbe_remmac(void *, const uint8_t *);
114 116 static void ixgbe_release_driver_control(struct ixgbe_hw *);
115 117
116 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
117 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
118 120 static int ixgbe_resume(dev_info_t *);
119 121 static int ixgbe_suspend(dev_info_t *);
120 122 static int ixgbe_quiesce(dev_info_t *);
121 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
122 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
123 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
124 126 static int ixgbe_intr_cb_register(ixgbe_t *);
125 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
126 128
127 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
128 130 const void *impl_data);
129 131 static void ixgbe_fm_init(ixgbe_t *);
130 132 static void ixgbe_fm_fini(ixgbe_t *);
131 133
132 134 char *ixgbe_priv_props[] = {
133 135 "_tx_copy_thresh",
134 136 "_tx_recycle_thresh",
135 137 "_tx_overload_thresh",
136 138 "_tx_resched_thresh",
137 139 "_rx_copy_thresh",
138 140 "_rx_limit_per_intr",
139 141 "_intr_throttling",
140 142 "_adv_pause_cap",
141 143 "_adv_asym_pause_cap",
142 144 NULL
143 145 };
144 146
145 147 #define IXGBE_MAX_PRIV_PROPS \
146 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
147 149
148 150 static struct cb_ops ixgbe_cb_ops = {
149 151 nulldev, /* cb_open */
150 152 nulldev, /* cb_close */
151 153 nodev, /* cb_strategy */
152 154 nodev, /* cb_print */
153 155 nodev, /* cb_dump */
154 156 nodev, /* cb_read */
155 157 nodev, /* cb_write */
156 158 nodev, /* cb_ioctl */
157 159 nodev, /* cb_devmap */
158 160 nodev, /* cb_mmap */
159 161 nodev, /* cb_segmap */
160 162 nochpoll, /* cb_chpoll */
161 163 ddi_prop_op, /* cb_prop_op */
162 164 NULL, /* cb_stream */
163 165 D_MP | D_HOTPLUG, /* cb_flag */
164 166 CB_REV, /* cb_rev */
165 167 nodev, /* cb_aread */
166 168 nodev /* cb_awrite */
167 169 };
168 170
169 171 static struct dev_ops ixgbe_dev_ops = {
170 172 DEVO_REV, /* devo_rev */
171 173 0, /* devo_refcnt */
172 174 NULL, /* devo_getinfo */
173 175 nulldev, /* devo_identify */
174 176 nulldev, /* devo_probe */
175 177 ixgbe_attach, /* devo_attach */
176 178 ixgbe_detach, /* devo_detach */
177 179 nodev, /* devo_reset */
178 180 &ixgbe_cb_ops, /* devo_cb_ops */
179 181 NULL, /* devo_bus_ops */
180 182 ddi_power, /* devo_power */
181 183 ixgbe_quiesce, /* devo_quiesce */
182 184 };
183 185
184 186 static struct modldrv ixgbe_modldrv = {
185 187 &mod_driverops, /* Type of module. This one is a driver */
186 188 ixgbe_ident, /* Discription string */
187 189 &ixgbe_dev_ops /* driver ops */
188 190 };
189 191
190 192 static struct modlinkage ixgbe_modlinkage = {
191 193 MODREV_1, &ixgbe_modldrv, NULL
192 194 };
193 195
194 196 /*
195 197 * Access attributes for register mapping
196 198 */
197 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
198 200 DDI_DEVICE_ATTR_V1,
199 201 DDI_STRUCTURE_LE_ACC,
200 202 DDI_STRICTORDER_ACC,
201 203 DDI_FLAGERR_ACC
202 204 };
203 205
204 206 /*
205 207 * Loopback property
206 208 */
207 209 static lb_property_t lb_normal = {
208 210 normal, "normal", IXGBE_LB_NONE
209 211 };
210 212
211 213 static lb_property_t lb_mac = {
212 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC
213 215 };
214 216
215 217 static lb_property_t lb_external = {
216 218 external, "External", IXGBE_LB_EXTERNAL
217 219 };
218 220
219 221 #define IXGBE_M_CALLBACK_FLAGS \
220 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
221 223
222 224 static mac_callbacks_t ixgbe_m_callbacks = {
223 225 IXGBE_M_CALLBACK_FLAGS,
224 226 ixgbe_m_stat,
225 227 ixgbe_m_start,
226 228 ixgbe_m_stop,
227 229 ixgbe_m_promisc,
228 230 ixgbe_m_multicst,
229 231 NULL,
230 232 NULL,
231 233 NULL,
232 234 ixgbe_m_ioctl,
233 235 ixgbe_m_getcapab,
234 236 NULL,
235 237 NULL,
236 238 ixgbe_m_setprop,
237 239 ixgbe_m_getprop,
238 240 ixgbe_m_propinfo
239 241 };
240 242
241 243 /*
242 244 * Initialize capabilities of each supported adapter type
243 245 */
244 246 static adapter_info_t ixgbe_82598eb_cap = {
245 247 64, /* maximum number of rx queues */
246 248 1, /* minimum number of rx queues */
247 249 64, /* default number of rx queues */
248 250 16, /* maximum number of rx groups */
249 251 1, /* minimum number of rx groups */
250 252 1, /* default number of rx groups */
251 253 32, /* maximum number of tx queues */
252 254 1, /* minimum number of tx queues */
253 255 8, /* default number of tx queues */
254 256 16366, /* maximum MTU size */
255 257 0xFFFF, /* maximum interrupt throttle rate */
256 258 0, /* minimum interrupt throttle rate */
257 259 200, /* default interrupt throttle rate */
258 260 18, /* maximum total msix vectors */
259 261 16, /* maximum number of ring vectors */
260 262 2, /* maximum number of other vectors */
261 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */
262 264 0, /* "other" interrupt types enable mask */
263 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
264 266 | IXGBE_FLAG_RSS_CAPABLE
265 267 | IXGBE_FLAG_VMDQ_CAPABLE)
266 268 };
267 269
268 270 static adapter_info_t ixgbe_82599eb_cap = {
269 271 128, /* maximum number of rx queues */
270 272 1, /* minimum number of rx queues */
271 273 128, /* default number of rx queues */
272 274 64, /* maximum number of rx groups */
273 275 1, /* minimum number of rx groups */
274 276 1, /* default number of rx groups */
275 277 128, /* maximum number of tx queues */
276 278 1, /* minimum number of tx queues */
277 279 8, /* default number of tx queues */
278 280 15500, /* maximum MTU size */
279 281 0xFF8, /* maximum interrupt throttle rate */
280 282 0, /* minimum interrupt throttle rate */
281 283 200, /* default interrupt throttle rate */
282 284 64, /* maximum total msix vectors */
283 285 16, /* maximum number of ring vectors */
284 286 2, /* maximum number of other vectors */
285 287 (IXGBE_EICR_LSC
286 288 | IXGBE_EICR_GPI_SDP1
287 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
288 290
289 291 (IXGBE_SDP1_GPIEN
290 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
291 293
292 294 (IXGBE_FLAG_DCA_CAPABLE
293 295 | IXGBE_FLAG_RSS_CAPABLE
294 296 | IXGBE_FLAG_VMDQ_CAPABLE
295 297 | IXGBE_FLAG_RSC_CAPABLE
296 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
297 299 };
298 300
299 301 static adapter_info_t ixgbe_X540_cap = {
300 302 128, /* maximum number of rx queues */
301 303 1, /* minimum number of rx queues */
302 304 128, /* default number of rx queues */
303 305 64, /* maximum number of rx groups */
304 306 1, /* minimum number of rx groups */
305 307 1, /* default number of rx groups */
306 308 128, /* maximum number of tx queues */
↓ open down ↓ |
218 lines elided |
↑ open up ↑ |
307 309 1, /* minimum number of tx queues */
308 310 8, /* default number of tx queues */
309 311 15500, /* maximum MTU size */
310 312 0xFF8, /* maximum interrupt throttle rate */
311 313 0, /* minimum interrupt throttle rate */
312 314 200, /* default interrupt throttle rate */
313 315 64, /* maximum total msix vectors */
314 316 16, /* maximum number of ring vectors */
315 317 2, /* maximum number of other vectors */
316 318 (IXGBE_EICR_LSC
317 - | IXGBE_EICR_GPI_SDP1
318 - | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
319 + | IXGBE_EICR_GPI_SDP1_X540
320 + | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */
319 321
320 - (IXGBE_SDP1_GPIEN
321 - | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
322 + (IXGBE_SDP1_GPIEN_X540
323 + | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */
322 324
323 325 (IXGBE_FLAG_DCA_CAPABLE
324 326 | IXGBE_FLAG_RSS_CAPABLE
325 327 | IXGBE_FLAG_VMDQ_CAPABLE
326 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
327 329 };
328 330
331 +static adapter_info_t ixgbe_X550_cap = {
332 + 128, /* maximum number of rx queues */
333 + 1, /* minimum number of rx queues */
334 + 128, /* default number of rx queues */
335 + 64, /* maximum number of rx groups */
336 + 1, /* minimum number of rx groups */
337 + 1, /* default number of rx groups */
338 + 128, /* maximum number of tx queues */
339 + 1, /* minimum number of tx queues */
340 + 8, /* default number of tx queues */
341 + 15500, /* maximum MTU size */
342 + 0xFF8, /* maximum interrupt throttle rate */
343 + 0, /* minimum interrupt throttle rate */
344 + 0x200, /* default interrupt throttle rate */
345 + 64, /* maximum total msix vectors */
346 + 16, /* maximum number of ring vectors */
347 + 2, /* maximum number of other vectors */
348 + IXGBE_EICR_LSC, /* "other" interrupt types handled */
349 + 0, /* "other" interrupt types enable mask */
350 + (IXGBE_FLAG_RSS_CAPABLE
351 + | IXGBE_FLAG_VMDQ_CAPABLE
352 + | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
353 +};
354 +
329 355 /*
330 356 * Module Initialization Functions.
331 357 */
332 358
333 359 int
334 360 _init(void)
335 361 {
336 362 int status;
337 363
338 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
339 365
340 366 status = mod_install(&ixgbe_modlinkage);
341 367
342 368 if (status != DDI_SUCCESS) {
343 369 mac_fini_ops(&ixgbe_dev_ops);
344 370 }
345 371
346 372 return (status);
347 373 }
348 374
349 375 int
350 376 _fini(void)
351 377 {
352 378 int status;
353 379
354 380 status = mod_remove(&ixgbe_modlinkage);
355 381
356 382 if (status == DDI_SUCCESS) {
357 383 mac_fini_ops(&ixgbe_dev_ops);
358 384 }
359 385
360 386 return (status);
361 387 }
362 388
363 389 int
364 390 _info(struct modinfo *modinfop)
365 391 {
366 392 int status;
367 393
368 394 status = mod_info(&ixgbe_modlinkage, modinfop);
369 395
370 396 return (status);
371 397 }
372 398
373 399 /*
374 400 * ixgbe_attach - Driver attach.
375 401 *
376 402 * This function is the device specific initialization entry
377 403 * point. This entry point is required and must be written.
378 404 * The DDI_ATTACH command must be provided in the attach entry
379 405 * point. When attach() is called with cmd set to DDI_ATTACH,
380 406 * all normal kernel services (such as kmem_alloc(9F)) are
381 407 * available for use by the driver.
382 408 *
383 409 * The attach() function will be called once for each instance
384 410 * of the device on the system with cmd set to DDI_ATTACH.
385 411 * Until attach() succeeds, the only driver entry points which
386 412 * may be called are open(9E) and getinfo(9E).
387 413 */
388 414 static int
389 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
390 416 {
391 417 ixgbe_t *ixgbe;
392 418 struct ixgbe_osdep *osdep;
393 419 struct ixgbe_hw *hw;
394 420 int instance;
395 421 char taskqname[32];
396 422
397 423 /*
398 424 * Check the command and perform corresponding operations
399 425 */
400 426 switch (cmd) {
401 427 default:
402 428 return (DDI_FAILURE);
403 429
404 430 case DDI_RESUME:
405 431 return (ixgbe_resume(devinfo));
406 432
407 433 case DDI_ATTACH:
408 434 break;
409 435 }
410 436
411 437 /* Get the device instance */
412 438 instance = ddi_get_instance(devinfo);
413 439
414 440 /* Allocate memory for the instance data structure */
415 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
416 442
417 443 ixgbe->dip = devinfo;
418 444 ixgbe->instance = instance;
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
419 445
420 446 hw = &ixgbe->hw;
421 447 osdep = &ixgbe->osdep;
422 448 hw->back = osdep;
423 449 osdep->ixgbe = ixgbe;
424 450
425 451 /* Attach the instance pointer to the dev_info data structure */
426 452 ddi_set_driver_private(devinfo, ixgbe);
427 453
428 454 /*
429 - * Initialize for fma support
455 + * Initialize for FMA support
430 456 */
431 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
432 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
433 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
434 460 ixgbe_fm_init(ixgbe);
435 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
436 462
437 463 /*
438 464 * Map PCI config space registers
439 465 */
440 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
441 467 ixgbe_error(ixgbe, "Failed to map PCI configurations");
442 468 goto attach_fail;
443 469 }
444 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
445 471
446 472 /*
447 473 * Identify the chipset family
448 474 */
449 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
450 476 ixgbe_error(ixgbe, "Failed to identify hardware");
451 477 goto attach_fail;
452 478 }
453 479
454 480 /*
455 481 * Map device registers
456 482 */
457 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
458 484 ixgbe_error(ixgbe, "Failed to map device registers");
459 485 goto attach_fail;
460 486 }
461 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
462 488
463 489 /*
464 490 * Initialize driver parameters
465 491 */
466 492 ixgbe_init_properties(ixgbe);
467 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
468 494
469 495 /*
470 496 * Register interrupt callback
471 497 */
472 498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
473 499 ixgbe_error(ixgbe, "Failed to register interrupt callback");
474 500 goto attach_fail;
475 501 }
476 502
477 503 /*
478 504 * Allocate interrupts
479 505 */
480 506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
481 507 ixgbe_error(ixgbe, "Failed to allocate interrupts");
482 508 goto attach_fail;
483 509 }
484 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
485 511
486 512 /*
487 513 * Allocate rx/tx rings based on the ring numbers.
488 514 * The actual numbers of rx/tx rings are decided by the number of
489 515 * allocated interrupt vectors, so we should allocate the rings after
490 516 * interrupts are allocated.
491 517 */
492 518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
493 519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
494 520 goto attach_fail;
495 521 }
496 522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
497 523
498 524 /*
499 525 * Map rings to interrupt vectors
500 526 */
501 527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
502 528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
503 529 goto attach_fail;
504 530 }
505 531
506 532 /*
507 533 * Add interrupt handlers
508 534 */
509 535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
510 536 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
511 537 goto attach_fail;
512 538 }
513 539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
514 540
515 541 /*
516 542 * Create a taskq for sfp-change
517 543 */
518 544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
519 545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
520 546 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
521 547 ixgbe_error(ixgbe, "sfp_taskq create failed");
522 548 goto attach_fail;
523 549 }
524 550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
525 551
526 552 /*
527 553 * Create a taskq for over-temp
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
528 554 */
529 555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
530 556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
531 557 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
532 558 ixgbe_error(ixgbe, "overtemp_taskq create failed");
533 559 goto attach_fail;
534 560 }
535 561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
536 562
537 563 /*
564 + * Create a taskq for processing external PHY interrupts
565 + */
566 + (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance);
567 + if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname,
568 + 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
569 + ixgbe_error(ixgbe, "phy_taskq create failed");
570 + goto attach_fail;
571 + }
572 + ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ;
573 +
574 + /*
538 575 * Initialize driver parameters
539 576 */
540 577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
541 578 ixgbe_error(ixgbe, "Failed to initialize driver settings");
542 579 goto attach_fail;
543 580 }
544 581
545 582 /*
546 583 * Initialize mutexes for this device.
547 584 * Do this before enabling the interrupt handler and
548 585 * register the softint to avoid the condition where
549 586 * interrupt handler can try using uninitialized mutex.
550 587 */
551 588 ixgbe_init_locks(ixgbe);
552 589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
553 590
554 591 /*
555 592 * Initialize chipset hardware
556 593 */
557 594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
558 595 ixgbe_error(ixgbe, "Failed to initialize adapter");
559 596 goto attach_fail;
560 597 }
561 598 ixgbe->link_check_complete = B_FALSE;
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
562 599 ixgbe->link_check_hrtime = gethrtime() +
563 600 (IXGBE_LINK_UP_TIME * 100000000ULL);
564 601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
565 602
566 603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
567 604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
568 605 goto attach_fail;
569 606 }
570 607
571 608 /*
609 + * Initialize adapter capabilities
610 + */
611 + ixgbe_init_params(ixgbe);
612 +
613 + /*
572 614 * Initialize statistics
573 615 */
574 616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
575 617 ixgbe_error(ixgbe, "Failed to initialize statistics");
576 618 goto attach_fail;
577 619 }
578 620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
579 621
580 622 /*
581 623 * Register the driver to the MAC
582 624 */
583 625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
584 626 ixgbe_error(ixgbe, "Failed to register MAC");
585 627 goto attach_fail;
586 628 }
587 629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
588 630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
589 631
590 632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
591 633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
592 634 if (ixgbe->periodic_id == 0) {
593 635 ixgbe_error(ixgbe, "Failed to add the link check timer");
594 636 goto attach_fail;
595 637 }
596 638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
597 639
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
598 640 /*
599 641 * Now that mutex locks are initialized, and the chip is also
600 642 * initialized, enable interrupts.
601 643 */
602 644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
603 645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
604 646 goto attach_fail;
605 647 }
606 648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
607 649
608 - ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
650 + ixgbe_log(ixgbe, "%s", ixgbe_ident);
609 651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
610 652
611 653 return (DDI_SUCCESS);
612 654
613 655 attach_fail:
614 656 ixgbe_unconfigure(devinfo, ixgbe);
615 657 return (DDI_FAILURE);
616 658 }
617 659
618 660 /*
619 661 * ixgbe_detach - Driver detach.
620 662 *
621 663 * The detach() function is the complement of the attach routine.
622 664 * If cmd is set to DDI_DETACH, detach() is used to remove the
623 665 * state associated with a given instance of a device node
624 666 * prior to the removal of that instance from the system.
625 667 *
626 668 * The detach() function will be called once for each instance
627 669 * of the device for which there has been a successful attach()
628 670 * once there are no longer any opens on the device.
629 671 *
630 672 * Interrupts routine are disabled, All memory allocated by this
631 673 * driver are freed.
632 674 */
633 675 static int
634 676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
635 677 {
636 678 ixgbe_t *ixgbe;
637 679
638 680 /*
639 681 * Check detach command
640 682 */
641 683 switch (cmd) {
642 684 default:
643 685 return (DDI_FAILURE);
644 686
645 687 case DDI_SUSPEND:
646 688 return (ixgbe_suspend(devinfo));
647 689
648 690 case DDI_DETACH:
649 691 break;
650 692 }
651 693
652 694 /*
653 695 * Get the pointer to the driver private data structure
654 696 */
655 697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
656 698 if (ixgbe == NULL)
657 699 return (DDI_FAILURE);
658 700
659 701 /*
660 702 * If the device is still running, it needs to be stopped first.
661 703 * This check is necessary because under some specific circumstances,
662 704 * the detach routine can be called without stopping the interface
663 705 * first.
664 706 */
665 707 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
666 708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
667 709 mutex_enter(&ixgbe->gen_lock);
668 710 ixgbe_stop(ixgbe, B_TRUE);
669 711 mutex_exit(&ixgbe->gen_lock);
670 712 /* Disable and stop the watchdog timer */
671 713 ixgbe_disable_watchdog_timer(ixgbe);
672 714 }
673 715
674 716 /*
675 717 * Check if there are still rx buffers held by the upper layer.
676 718 * If so, fail the detach.
677 719 */
678 720 if (!ixgbe_rx_drain(ixgbe))
679 721 return (DDI_FAILURE);
680 722
681 723 /*
682 724 * Do the remaining unconfigure routines
683 725 */
684 726 ixgbe_unconfigure(devinfo, ixgbe);
685 727
686 728 return (DDI_SUCCESS);
687 729 }
688 730
689 731 /*
690 732 * quiesce(9E) entry point.
691 733 *
692 734 * This function is called when the system is single-threaded at high
693 735 * PIL with preemption disabled. Therefore, this function must not be
694 736 * blocked.
695 737 *
696 738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
697 739 * DDI_FAILURE indicates an error condition and should almost never happen.
698 740 */
699 741 static int
700 742 ixgbe_quiesce(dev_info_t *devinfo)
701 743 {
702 744 ixgbe_t *ixgbe;
703 745 struct ixgbe_hw *hw;
704 746
705 747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
706 748
707 749 if (ixgbe == NULL)
708 750 return (DDI_FAILURE);
709 751
710 752 hw = &ixgbe->hw;
711 753
712 754 /*
713 755 * Disable the adapter interrupts
714 756 */
715 757 ixgbe_disable_adapter_interrupts(ixgbe);
716 758
717 759 /*
718 760 * Tell firmware driver is no longer in control
719 761 */
720 762 ixgbe_release_driver_control(hw);
721 763
722 764 /*
723 765 * Reset the chipset
724 766 */
725 767 (void) ixgbe_reset_hw(hw);
726 768
727 769 /*
728 770 * Reset PHY
729 771 */
730 772 (void) ixgbe_reset_phy(hw);
731 773
732 774 return (DDI_SUCCESS);
733 775 }
734 776
735 777 static void
736 778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
737 779 {
738 780 /*
739 781 * Disable interrupt
740 782 */
741 783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
742 784 (void) ixgbe_disable_intrs(ixgbe);
743 785 }
744 786
745 787 /*
746 788 * remove the link check timer
747 789 */
748 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
749 791 if (ixgbe->periodic_id != NULL) {
750 792 ddi_periodic_delete(ixgbe->periodic_id);
751 793 ixgbe->periodic_id = NULL;
752 794 }
753 795 }
754 796
755 797 /*
756 798 * Unregister MAC
757 799 */
758 800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
759 801 (void) mac_unregister(ixgbe->mac_hdl);
760 802 }
761 803
762 804 /*
763 805 * Free statistics
764 806 */
765 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
766 808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
767 809 }
768 810
769 811 /*
770 812 * Remove interrupt handlers
771 813 */
772 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
773 815 ixgbe_rem_intr_handlers(ixgbe);
774 816 }
775 817
776 818 /*
777 819 * Remove taskq for sfp-status-change
778 820 */
779 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
780 822 ddi_taskq_destroy(ixgbe->sfp_taskq);
↓ open down ↓ |
162 lines elided |
↑ open up ↑ |
781 823 }
782 824
783 825 /*
784 826 * Remove taskq for over-temp
785 827 */
786 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
787 829 ddi_taskq_destroy(ixgbe->overtemp_taskq);
788 830 }
789 831
790 832 /*
833 + * Remove taskq for external PHYs
834 + */
835 + if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) {
836 + ddi_taskq_destroy(ixgbe->phy_taskq);
837 + }
838 +
839 + /*
791 840 * Remove interrupts
792 841 */
793 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
794 843 ixgbe_rem_intrs(ixgbe);
795 844 }
796 845
797 846 /*
798 847 * Unregister interrupt callback handler
799 848 */
800 849 (void) ddi_cb_unregister(ixgbe->cb_hdl);
801 850
802 851 /*
803 852 * Remove driver properties
804 853 */
805 854 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
806 855 (void) ddi_prop_remove_all(devinfo);
807 856 }
808 857
809 858 /*
810 859 * Stop the chipset
811 860 */
812 861 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
813 862 mutex_enter(&ixgbe->gen_lock);
814 863 ixgbe_chip_stop(ixgbe);
815 864 mutex_exit(&ixgbe->gen_lock);
816 865 }
817 866
818 867 /*
819 868 * Free register handle
820 869 */
821 870 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
822 871 if (ixgbe->osdep.reg_handle != NULL)
823 872 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
824 873 }
825 874
826 875 /*
827 876 * Free PCI config handle
828 877 */
829 878 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
830 879 if (ixgbe->osdep.cfg_handle != NULL)
831 880 pci_config_teardown(&ixgbe->osdep.cfg_handle);
832 881 }
833 882
834 883 /*
835 884 * Free locks
836 885 */
837 886 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
838 887 ixgbe_destroy_locks(ixgbe);
839 888 }
840 889
841 890 /*
842 891 * Free the rx/tx rings
843 892 */
844 893 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
845 894 ixgbe_free_rings(ixgbe);
846 895 }
847 896
848 897 /*
849 898 * Unregister FMA capabilities
850 899 */
851 900 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
852 901 ixgbe_fm_fini(ixgbe);
853 902 }
854 903
855 904 /*
856 905 * Free the driver data structure
857 906 */
858 907 kmem_free(ixgbe, sizeof (ixgbe_t));
859 908
860 909 ddi_set_driver_private(devinfo, NULL);
861 910 }
862 911
863 912 /*
864 913 * ixgbe_register_mac - Register the driver and its function pointers with
865 914 * the GLD interface.
866 915 */
867 916 static int
868 917 ixgbe_register_mac(ixgbe_t *ixgbe)
869 918 {
870 919 struct ixgbe_hw *hw = &ixgbe->hw;
871 920 mac_register_t *mac;
872 921 int status;
873 922
874 923 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
875 924 return (IXGBE_FAILURE);
876 925
877 926 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
878 927 mac->m_driver = ixgbe;
879 928 mac->m_dip = ixgbe->dip;
880 929 mac->m_src_addr = hw->mac.addr;
881 930 mac->m_callbacks = &ixgbe_m_callbacks;
882 931 mac->m_min_sdu = 0;
883 932 mac->m_max_sdu = ixgbe->default_mtu;
884 933 mac->m_margin = VLAN_TAGSZ;
885 934 mac->m_priv_props = ixgbe_priv_props;
886 935 mac->m_v12n = MAC_VIRT_LEVEL1;
887 936
888 937 status = mac_register(mac, &ixgbe->mac_hdl);
889 938
890 939 mac_free(mac);
891 940
892 941 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
893 942 }
894 943
895 944 /*
896 945 * ixgbe_identify_hardware - Identify the type of the chipset.
897 946 */
898 947 static int
899 948 ixgbe_identify_hardware(ixgbe_t *ixgbe)
900 949 {
901 950 struct ixgbe_hw *hw = &ixgbe->hw;
902 951 struct ixgbe_osdep *osdep = &ixgbe->osdep;
903 952
904 953 /*
905 954 * Get the device id
906 955 */
907 956 hw->vendor_id =
908 957 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
909 958 hw->device_id =
910 959 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
911 960 hw->revision_id =
912 961 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
913 962 hw->subsystem_device_id =
914 963 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
915 964 hw->subsystem_vendor_id =
916 965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
917 966
918 967 /*
919 968 * Set the mac type of the adapter based on the device id
920 969 */
921 970 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
922 971 return (IXGBE_FAILURE);
923 972 }
924 973
925 974 /*
926 975 * Install adapter capabilities
927 976 */
928 977 switch (hw->mac.type) {
929 978 case ixgbe_mac_82598EB:
930 979 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
931 980 ixgbe->capab = &ixgbe_82598eb_cap;
932 981
933 982 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
934 983 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
935 984 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
936 985 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
937 986 }
938 987 break;
939 988
940 989 case ixgbe_mac_82599EB:
941 990 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
942 991 ixgbe->capab = &ixgbe_82599eb_cap;
943 992
944 993 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
945 994 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
946 995 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
947 996 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
948 997 }
949 998 break;
↓ open down ↓ |
149 lines elided |
↑ open up ↑ |
950 999
951 1000 case ixgbe_mac_X540:
952 1001 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
953 1002 ixgbe->capab = &ixgbe_X540_cap;
954 1003 /*
955 1004 * For now, X540 is all set in its capab structure.
956 1005 * As other X540 variants show up, things can change here.
957 1006 */
958 1007 break;
959 1008
1009 + case ixgbe_mac_X550:
1010 + case ixgbe_mac_X550EM_x:
1011 + IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n");
1012 + ixgbe->capab = &ixgbe_X550_cap;
1013 +
1014 + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1015 + ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE;
1016 +
1017 + /*
1018 + * Link detection on X552 SFP+ and X552/X557-AT
1019 + */
1020 + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1021 + hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
1022 + ixgbe->capab->other_intr |=
1023 + IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
1024 + ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540;
1025 + }
1026 + break;
1027 +
960 1028 default:
961 1029 IXGBE_DEBUGLOG_1(ixgbe,
962 1030 "adapter not supported in ixgbe_identify_hardware(): %d\n",
963 1031 hw->mac.type);
964 1032 return (IXGBE_FAILURE);
965 1033 }
966 1034
967 1035 return (IXGBE_SUCCESS);
968 1036 }
969 1037
970 1038 /*
971 1039 * ixgbe_regs_map - Map the device registers.
972 1040 *
973 1041 */
974 1042 static int
975 1043 ixgbe_regs_map(ixgbe_t *ixgbe)
976 1044 {
977 1045 dev_info_t *devinfo = ixgbe->dip;
978 1046 struct ixgbe_hw *hw = &ixgbe->hw;
979 1047 struct ixgbe_osdep *osdep = &ixgbe->osdep;
980 1048 off_t mem_size;
981 1049
982 1050 /*
983 1051 * First get the size of device registers to be mapped.
984 1052 */
985 1053 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
986 1054 != DDI_SUCCESS) {
987 1055 return (IXGBE_FAILURE);
988 1056 }
989 1057
990 1058 /*
991 1059 * Call ddi_regs_map_setup() to map registers
992 1060 */
993 1061 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
994 1062 (caddr_t *)&hw->hw_addr, 0,
995 1063 mem_size, &ixgbe_regs_acc_attr,
996 1064 &osdep->reg_handle)) != DDI_SUCCESS) {
997 1065 return (IXGBE_FAILURE);
998 1066 }
999 1067
1000 1068 return (IXGBE_SUCCESS);
1001 1069 }
1002 1070
1003 1071 /*
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
1004 1072 * ixgbe_init_properties - Initialize driver properties.
1005 1073 */
1006 1074 static void
1007 1075 ixgbe_init_properties(ixgbe_t *ixgbe)
1008 1076 {
1009 1077 /*
1010 1078 * Get conf file properties, including link settings
1011 1079 * jumbo frames, ring number, descriptor number, etc.
1012 1080 */
1013 1081 ixgbe_get_conf(ixgbe);
1014 -
1015 - ixgbe_init_params(ixgbe);
1016 1082 }
1017 1083
1018 1084 /*
1019 1085 * ixgbe_init_driver_settings - Initialize driver settings.
1020 1086 *
1021 1087 * The settings include hardware function pointers, bus information,
1022 1088 * rx/tx rings settings, link state, and any other parameters that
1023 1089 * need to be setup during driver initialization.
1024 1090 */
1025 1091 static int
1026 1092 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
1027 1093 {
1028 1094 struct ixgbe_hw *hw = &ixgbe->hw;
1029 1095 dev_info_t *devinfo = ixgbe->dip;
1030 1096 ixgbe_rx_ring_t *rx_ring;
1031 1097 ixgbe_rx_group_t *rx_group;
1032 1098 ixgbe_tx_ring_t *tx_ring;
1033 1099 uint32_t rx_size;
1034 1100 uint32_t tx_size;
1035 1101 uint32_t ring_per_group;
1036 1102 int i;
1037 1103
1038 1104 /*
1039 1105 * Initialize chipset specific hardware function pointers
1040 1106 */
1041 1107 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
1042 1108 return (IXGBE_FAILURE);
1043 1109 }
1044 1110
1045 1111 /*
1046 1112 * Get the system page size
1047 1113 */
1048 1114 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1049 1115
1050 1116 /*
1051 1117 * Set rx buffer size
1052 1118 *
1053 1119 * The IP header alignment room is counted in the calculation.
1054 1120 * The rx buffer size is in unit of 1K that is required by the
1055 1121 * chipset hardware.
1056 1122 */
1057 1123 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1058 1124 ixgbe->rx_buf_size = ((rx_size >> 10) +
1059 1125 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1060 1126
1061 1127 /*
1062 1128 * Set tx buffer size
1063 1129 */
1064 1130 tx_size = ixgbe->max_frame_size;
1065 1131 ixgbe->tx_buf_size = ((tx_size >> 10) +
1066 1132 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1067 1133
1068 1134 /*
1069 1135 * Initialize rx/tx rings/groups parameters
1070 1136 */
1071 1137 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1072 1138 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1073 1139 rx_ring = &ixgbe->rx_rings[i];
1074 1140 rx_ring->index = i;
1075 1141 rx_ring->ixgbe = ixgbe;
1076 1142 rx_ring->group_index = i / ring_per_group;
1077 1143 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1078 1144 }
1079 1145
1080 1146 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1081 1147 rx_group = &ixgbe->rx_groups[i];
1082 1148 rx_group->index = i;
1083 1149 rx_group->ixgbe = ixgbe;
1084 1150 }
1085 1151
1086 1152 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1087 1153 tx_ring = &ixgbe->tx_rings[i];
1088 1154 tx_ring->index = i;
1089 1155 tx_ring->ixgbe = ixgbe;
1090 1156 if (ixgbe->tx_head_wb_enable)
1091 1157 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1092 1158 else
1093 1159 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1094 1160
1095 1161 tx_ring->ring_size = ixgbe->tx_ring_size;
1096 1162 tx_ring->free_list_size = ixgbe->tx_ring_size +
1097 1163 (ixgbe->tx_ring_size >> 1);
1098 1164 }
1099 1165
1100 1166 /*
1101 1167 * Initialize values of interrupt throttling rate
1102 1168 */
1103 1169 for (i = 1; i < MAX_INTR_VECTOR; i++)
1104 1170 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1105 1171
1106 1172 /*
1107 1173 * The initial link state should be "unknown"
1108 1174 */
1109 1175 ixgbe->link_state = LINK_STATE_UNKNOWN;
1110 1176
1111 1177 return (IXGBE_SUCCESS);
1112 1178 }
1113 1179
1114 1180 /*
1115 1181 * ixgbe_init_locks - Initialize locks.
1116 1182 */
1117 1183 static void
1118 1184 ixgbe_init_locks(ixgbe_t *ixgbe)
1119 1185 {
1120 1186 ixgbe_rx_ring_t *rx_ring;
1121 1187 ixgbe_tx_ring_t *tx_ring;
1122 1188 int i;
1123 1189
1124 1190 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1125 1191 rx_ring = &ixgbe->rx_rings[i];
1126 1192 mutex_init(&rx_ring->rx_lock, NULL,
1127 1193 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1128 1194 }
1129 1195
1130 1196 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1131 1197 tx_ring = &ixgbe->tx_rings[i];
1132 1198 mutex_init(&tx_ring->tx_lock, NULL,
1133 1199 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1134 1200 mutex_init(&tx_ring->recycle_lock, NULL,
1135 1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1136 1202 mutex_init(&tx_ring->tcb_head_lock, NULL,
1137 1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1138 1204 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1139 1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1140 1206 }
1141 1207
1142 1208 mutex_init(&ixgbe->gen_lock, NULL,
1143 1209 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1144 1210
1145 1211 mutex_init(&ixgbe->watchdog_lock, NULL,
1146 1212 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1147 1213 }
1148 1214
1149 1215 /*
1150 1216 * ixgbe_destroy_locks - Destroy locks.
1151 1217 */
1152 1218 static void
1153 1219 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1154 1220 {
1155 1221 ixgbe_rx_ring_t *rx_ring;
1156 1222 ixgbe_tx_ring_t *tx_ring;
1157 1223 int i;
1158 1224
1159 1225 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1160 1226 rx_ring = &ixgbe->rx_rings[i];
1161 1227 mutex_destroy(&rx_ring->rx_lock);
1162 1228 }
1163 1229
1164 1230 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1165 1231 tx_ring = &ixgbe->tx_rings[i];
1166 1232 mutex_destroy(&tx_ring->tx_lock);
1167 1233 mutex_destroy(&tx_ring->recycle_lock);
1168 1234 mutex_destroy(&tx_ring->tcb_head_lock);
1169 1235 mutex_destroy(&tx_ring->tcb_tail_lock);
1170 1236 }
1171 1237
1172 1238 mutex_destroy(&ixgbe->gen_lock);
1173 1239 mutex_destroy(&ixgbe->watchdog_lock);
1174 1240 }
1175 1241
1176 1242 static int
1177 1243 ixgbe_resume(dev_info_t *devinfo)
1178 1244 {
1179 1245 ixgbe_t *ixgbe;
1180 1246 int i;
1181 1247
1182 1248 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1183 1249 if (ixgbe == NULL)
1184 1250 return (DDI_FAILURE);
1185 1251
1186 1252 mutex_enter(&ixgbe->gen_lock);
1187 1253
1188 1254 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1189 1255 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1190 1256 mutex_exit(&ixgbe->gen_lock);
1191 1257 return (DDI_FAILURE);
1192 1258 }
1193 1259
1194 1260 /*
1195 1261 * Enable and start the watchdog timer
1196 1262 */
1197 1263 ixgbe_enable_watchdog_timer(ixgbe);
1198 1264 }
1199 1265
1200 1266 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1201 1267
1202 1268 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1203 1269 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1204 1270 mac_tx_ring_update(ixgbe->mac_hdl,
1205 1271 ixgbe->tx_rings[i].ring_handle);
1206 1272 }
1207 1273 }
1208 1274
1209 1275 mutex_exit(&ixgbe->gen_lock);
1210 1276
1211 1277 return (DDI_SUCCESS);
1212 1278 }
1213 1279
1214 1280 static int
1215 1281 ixgbe_suspend(dev_info_t *devinfo)
1216 1282 {
1217 1283 ixgbe_t *ixgbe;
1218 1284
1219 1285 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1220 1286 if (ixgbe == NULL)
1221 1287 return (DDI_FAILURE);
1222 1288
1223 1289 mutex_enter(&ixgbe->gen_lock);
1224 1290
1225 1291 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1226 1292 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1227 1293 mutex_exit(&ixgbe->gen_lock);
1228 1294 return (DDI_SUCCESS);
1229 1295 }
1230 1296 ixgbe_stop(ixgbe, B_FALSE);
1231 1297
1232 1298 mutex_exit(&ixgbe->gen_lock);
1233 1299
1234 1300 /*
1235 1301 * Disable and stop the watchdog timer
1236 1302 */
1237 1303 ixgbe_disable_watchdog_timer(ixgbe);
1238 1304
1239 1305 return (DDI_SUCCESS);
↓ open down ↓ |
214 lines elided |
↑ open up ↑ |
1240 1306 }
1241 1307
1242 1308 /*
1243 1309 * ixgbe_init - Initialize the device.
1244 1310 */
1245 1311 static int
1246 1312 ixgbe_init(ixgbe_t *ixgbe)
1247 1313 {
1248 1314 struct ixgbe_hw *hw = &ixgbe->hw;
1249 1315 u8 pbanum[IXGBE_PBANUM_LENGTH];
1316 + int rv;
1250 1317
1251 1318 mutex_enter(&ixgbe->gen_lock);
1252 1319
1253 1320 /*
1254 - * Reset chipset to put the hardware in a known state
1255 - * before we try to do anything with the eeprom.
1321 + * Configure/Initialize hardware
1256 1322 */
1257 - if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1258 - ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1259 - goto init_fail;
1323 + rv = ixgbe_init_hw(hw);
1324 + if (rv != IXGBE_SUCCESS) {
1325 + switch (rv) {
1326 +
1327 + /*
1328 + * The first three errors are not prohibitive to us progressing
1329 + * further, and are maily advisory in nature. In the case of a
1330 + * SFP module not being present or not deemed supported by the
1331 + * common code, we adivse the operator of this fact but carry on
1332 + * instead of failing hard, as SFPs can be inserted or replaced
1333 + * while the driver is running. In the case of a unknown error,
1334 + * we fail-hard, logging the reason and emitting a FMA event.
1335 + */
1336 + case IXGBE_ERR_EEPROM_VERSION:
1337 + ixgbe_error(ixgbe,
1338 + "This Intel 10Gb Ethernet device is pre-release and"
1339 + " contains outdated firmware. Please contact your"
1340 + " hardware vendor for a replacement.");
1341 + break;
1342 + case IXGBE_ERR_SFP_NOT_PRESENT:
1343 + ixgbe_error(ixgbe,
1344 + "No SFP+ module detected on this interface. Please "
1345 + "install a supported SFP+ module for this "
1346 + "interface to become operational.");
1347 + break;
1348 + case IXGBE_ERR_SFP_NOT_SUPPORTED:
1349 + ixgbe_error(ixgbe,
1350 + "Unsupported SFP+ module detected. Please replace "
1351 + "it with a supported SFP+ module per Intel "
1352 + "documentation, or bypass this check with "
1353 + "allow_unsupported_sfp=1 in ixgbe.conf.");
1354 + break;
1355 + default:
1356 + ixgbe_error(ixgbe,
1357 + "Failed to initialize hardware. ixgbe_init_hw "
1358 + "returned %d", rv);
1359 + ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1360 + goto init_fail;
1361 + }
1260 1362 }
1261 1363
1262 1364 /*
1263 1365 * Need to init eeprom before validating the checksum.
1264 1366 */
1265 1367 if (ixgbe_init_eeprom_params(hw) < 0) {
1266 1368 ixgbe_error(ixgbe,
1267 1369 "Unable to intitialize the eeprom interface.");
1268 1370 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1269 1371 goto init_fail;
1270 1372 }
1271 1373
1272 1374 /*
1273 1375 * NVM validation
1274 1376 */
1275 1377 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1276 1378 /*
1277 1379 * Some PCI-E parts fail the first check due to
1278 1380 * the link being in sleep state. Call it again,
1279 1381 * if it fails a second time it's a real issue.
1280 1382 */
1281 1383 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1282 1384 ixgbe_error(ixgbe,
1283 1385 "Invalid NVM checksum. Please contact "
1284 1386 "the vendor to update the NVM.");
1285 1387 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1286 1388 goto init_fail;
1287 1389 }
1288 1390 }
1289 1391
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
1290 1392 /*
1291 1393 * Setup default flow control thresholds - enable/disable
1292 1394 * & flow control type is controlled by ixgbe.conf
1293 1395 */
1294 1396 hw->fc.high_water[0] = DEFAULT_FCRTH;
1295 1397 hw->fc.low_water[0] = DEFAULT_FCRTL;
1296 1398 hw->fc.pause_time = DEFAULT_FCPAUSE;
1297 1399 hw->fc.send_xon = B_TRUE;
1298 1400
1299 1401 /*
1402 + * Initialize flow control
1403 + */
1404 + (void) ixgbe_start_hw(hw);
1405 +
1406 + /*
1300 1407 * Initialize link settings
1301 1408 */
1302 1409 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1303 1410
1304 1411 /*
1305 1412 * Initialize the chipset hardware
1306 1413 */
1307 1414 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1308 1415 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1309 1416 goto init_fail;
1310 1417 }
1311 1418
1312 1419 /*
1313 1420 * Read identifying information and place in devinfo.
1314 1421 */
1315 1422 pbanum[0] = '\0';
1316 1423 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum));
1317 1424 if (*pbanum != '\0') {
1318 1425 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip,
1319 1426 "printed-board-assembly", (char *)pbanum);
1320 1427 }
1321 1428
1322 1429 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1323 1430 goto init_fail;
1324 1431 }
1325 1432
1326 1433 mutex_exit(&ixgbe->gen_lock);
1327 1434 return (IXGBE_SUCCESS);
1328 1435
1329 1436 init_fail:
1330 1437 /*
1331 1438 * Reset PHY
1332 1439 */
1333 1440 (void) ixgbe_reset_phy(hw);
1334 1441
1335 1442 mutex_exit(&ixgbe->gen_lock);
1336 1443 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
1337 1444 return (IXGBE_FAILURE);
1338 1445 }
1339 1446
1340 1447 /*
1341 1448 * ixgbe_chip_start - Initialize and start the chipset hardware.
1342 1449 */
1343 1450 static int
1344 1451 ixgbe_chip_start(ixgbe_t *ixgbe)
1345 1452 {
1346 1453 struct ixgbe_hw *hw = &ixgbe->hw;
1347 - int ret_val, i;
1454 + int i;
1348 1455
1349 1456 ASSERT(mutex_owned(&ixgbe->gen_lock));
1350 1457
1351 1458 /*
1352 1459 * Get the mac address
1353 1460 * This function should handle SPARC case correctly.
1354 1461 */
1355 1462 if (!ixgbe_find_mac_address(ixgbe)) {
1356 1463 ixgbe_error(ixgbe, "Failed to get the mac address");
1357 1464 return (IXGBE_FAILURE);
1358 1465 }
1359 1466
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1360 1467 /*
1361 1468 * Validate the mac address
1362 1469 */
1363 1470 (void) ixgbe_init_rx_addrs(hw);
1364 1471 if (!is_valid_mac_addr(hw->mac.addr)) {
1365 1472 ixgbe_error(ixgbe, "Invalid mac address");
1366 1473 return (IXGBE_FAILURE);
1367 1474 }
1368 1475
1369 1476 /*
1370 - * Configure/Initialize hardware
1371 - */
1372 - ret_val = ixgbe_init_hw(hw);
1373 - if (ret_val != IXGBE_SUCCESS) {
1374 - if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1375 - ixgbe_error(ixgbe,
1376 - "This 82599 device is pre-release and contains"
1377 - " outdated firmware, please contact your hardware"
1378 - " vendor for a replacement.");
1379 - } else {
1380 - ixgbe_error(ixgbe, "Failed to initialize hardware");
1381 - return (IXGBE_FAILURE);
1382 - }
1383 - }
1384 -
1385 - /*
1386 1477 * Re-enable relaxed ordering for performance. It is disabled
1387 1478 * by default in the hardware init.
1388 1479 */
1389 1480 if (ixgbe->relax_order_enable == B_TRUE)
1390 1481 ixgbe_enable_relaxed_ordering(hw);
1391 1482
1392 1483 /*
1393 1484 * Setup adapter interrupt vectors
1394 1485 */
1395 1486 ixgbe_setup_adapter_vector(ixgbe);
1396 1487
1397 1488 /*
1398 1489 * Initialize unicast addresses.
1399 1490 */
1400 1491 ixgbe_init_unicst(ixgbe);
1401 1492
1402 1493 /*
1403 1494 * Setup and initialize the mctable structures.
1404 1495 */
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
1405 1496 ixgbe_setup_multicst(ixgbe);
1406 1497
1407 1498 /*
1408 1499 * Set interrupt throttling rate
1409 1500 */
1410 1501 for (i = 0; i < ixgbe->intr_cnt; i++) {
1411 1502 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1412 1503 }
1413 1504
1414 1505 /*
1415 - * Save the state of the phy
1506 + * Disable Wake-on-LAN
1416 1507 */
1508 + IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
1509 +
1510 + /*
1511 + * Some adapters offer Energy Efficient Ethernet (EEE) support.
1512 + * Due to issues with EEE in e1000g/igb, we disable this by default
1513 + * as a precautionary measure.
1514 + *
1515 + * Currently, the only known adapter which supports EEE in the ixgbe
1516 + * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the
1517 + * first revision of it, as well as any X550 with MAC type 6 (non-EM)
1518 + */
1519 + (void) ixgbe_setup_eee(hw, B_FALSE);
1520 +
1521 + /*
1522 + * Turn on any present SFP Tx laser
1523 + */
1524 + ixgbe_enable_tx_laser(hw);
1525 +
1526 + /*
1527 + * Power on the PHY
1528 + */
1529 + (void) ixgbe_set_phy_power(hw, B_TRUE);
1530 +
1531 + /*
1532 + * Save the state of the PHY
1533 + */
1417 1534 ixgbe_get_hw_state(ixgbe);
1418 1535
1419 1536 /*
1420 1537 * Make sure driver has control
1421 1538 */
1422 1539 ixgbe_get_driver_control(hw);
1423 1540
1424 1541 return (IXGBE_SUCCESS);
1425 1542 }
1426 1543
1427 1544 /*
1428 1545 * ixgbe_chip_stop - Stop the chipset hardware
1429 1546 */
1430 1547 static void
1431 1548 ixgbe_chip_stop(ixgbe_t *ixgbe)
1432 1549 {
1433 1550 struct ixgbe_hw *hw = &ixgbe->hw;
1551 + int rv;
1434 1552
1435 1553 ASSERT(mutex_owned(&ixgbe->gen_lock));
1436 1554
1437 1555 /*
1438 - * Tell firmware driver is no longer in control
1556 + * Stop interupt generation and disable Tx unit
1439 1557 */
1440 - ixgbe_release_driver_control(hw);
1558 + hw->adapter_stopped = B_FALSE;
1559 + (void) ixgbe_stop_adapter(hw);
1441 1560
1442 1561 /*
1443 1562 * Reset the chipset
1444 1563 */
1445 1564 (void) ixgbe_reset_hw(hw);
1446 1565
1447 1566 /*
1448 1567 * Reset PHY
1449 1568 */
1450 1569 (void) ixgbe_reset_phy(hw);
1570 +
1571 + /*
1572 + * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting
1573 + * the PHY while doing so. Else, just power down the PHY.
1574 + */
1575 + if (hw->phy.ops.enter_lplu != NULL) {
1576 + hw->phy.reset_disable = B_TRUE;
1577 + rv = hw->phy.ops.enter_lplu(hw);
1578 + if (rv != IXGBE_SUCCESS)
1579 + ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv);
1580 + hw->phy.reset_disable = B_FALSE;
1581 + } else {
1582 + (void) ixgbe_set_phy_power(hw, B_FALSE);
1583 + }
1584 +
1585 + /*
1586 + * Turn off any present SFP Tx laser
1587 + * Expected for health and safety reasons
1588 + */
1589 + ixgbe_disable_tx_laser(hw);
1590 +
1591 + /*
1592 + * Tell firmware driver is no longer in control
1593 + */
1594 + ixgbe_release_driver_control(hw);
1595 +
1451 1596 }
1452 1597
1453 1598 /*
1454 1599 * ixgbe_reset - Reset the chipset and re-start the driver.
1455 1600 *
1456 1601 * It involves stopping and re-starting the chipset,
1457 1602 * and re-configuring the rx/tx rings.
1458 1603 */
1459 1604 static int
1460 1605 ixgbe_reset(ixgbe_t *ixgbe)
1461 1606 {
1462 1607 int i;
1463 1608
1464 1609 /*
1465 1610 * Disable and stop the watchdog timer
1466 1611 */
1467 1612 ixgbe_disable_watchdog_timer(ixgbe);
1468 1613
1469 1614 mutex_enter(&ixgbe->gen_lock);
1470 1615
1471 1616 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1472 1617 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1473 1618
1474 1619 ixgbe_stop(ixgbe, B_FALSE);
1475 1620
1476 1621 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1477 1622 mutex_exit(&ixgbe->gen_lock);
1478 1623 return (IXGBE_FAILURE);
1479 1624 }
1480 1625
1481 1626 /*
1482 1627 * After resetting, need to recheck the link status.
1483 1628 */
1484 1629 ixgbe->link_check_complete = B_FALSE;
1485 1630 ixgbe->link_check_hrtime = gethrtime() +
1486 1631 (IXGBE_LINK_UP_TIME * 100000000ULL);
1487 1632
1488 1633 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1489 1634
1490 1635 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1491 1636 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1492 1637 mac_tx_ring_update(ixgbe->mac_hdl,
1493 1638 ixgbe->tx_rings[i].ring_handle);
1494 1639 }
1495 1640 }
1496 1641
1497 1642 mutex_exit(&ixgbe->gen_lock);
1498 1643
1499 1644 /*
1500 1645 * Enable and start the watchdog timer
1501 1646 */
1502 1647 ixgbe_enable_watchdog_timer(ixgbe);
1503 1648
1504 1649 return (IXGBE_SUCCESS);
1505 1650 }
1506 1651
1507 1652 /*
1508 1653 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1509 1654 */
1510 1655 static void
1511 1656 ixgbe_tx_clean(ixgbe_t *ixgbe)
1512 1657 {
1513 1658 ixgbe_tx_ring_t *tx_ring;
1514 1659 tx_control_block_t *tcb;
1515 1660 link_list_t pending_list;
1516 1661 uint32_t desc_num;
1517 1662 int i, j;
1518 1663
1519 1664 LINK_LIST_INIT(&pending_list);
1520 1665
1521 1666 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1522 1667 tx_ring = &ixgbe->tx_rings[i];
1523 1668
1524 1669 mutex_enter(&tx_ring->recycle_lock);
1525 1670
1526 1671 /*
1527 1672 * Clean the pending tx data - the pending packets in the
1528 1673 * work_list that have no chances to be transmitted again.
1529 1674 *
1530 1675 * We must ensure the chipset is stopped or the link is down
1531 1676 * before cleaning the transmit packets.
1532 1677 */
1533 1678 desc_num = 0;
1534 1679 for (j = 0; j < tx_ring->ring_size; j++) {
1535 1680 tcb = tx_ring->work_list[j];
1536 1681 if (tcb != NULL) {
1537 1682 desc_num += tcb->desc_num;
1538 1683
1539 1684 tx_ring->work_list[j] = NULL;
1540 1685
1541 1686 ixgbe_free_tcb(tcb);
1542 1687
1543 1688 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1544 1689 }
1545 1690 }
1546 1691
1547 1692 if (desc_num > 0) {
1548 1693 atomic_add_32(&tx_ring->tbd_free, desc_num);
1549 1694 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1550 1695
1551 1696 /*
1552 1697 * Reset the head and tail pointers of the tbd ring;
1553 1698 * Reset the writeback head if it's enable.
1554 1699 */
1555 1700 tx_ring->tbd_head = 0;
1556 1701 tx_ring->tbd_tail = 0;
1557 1702 if (ixgbe->tx_head_wb_enable)
1558 1703 *tx_ring->tbd_head_wb = 0;
1559 1704
1560 1705 IXGBE_WRITE_REG(&ixgbe->hw,
1561 1706 IXGBE_TDH(tx_ring->index), 0);
1562 1707 IXGBE_WRITE_REG(&ixgbe->hw,
1563 1708 IXGBE_TDT(tx_ring->index), 0);
1564 1709 }
1565 1710
1566 1711 mutex_exit(&tx_ring->recycle_lock);
1567 1712
1568 1713 /*
1569 1714 * Add the tx control blocks in the pending list to
1570 1715 * the free list.
1571 1716 */
1572 1717 ixgbe_put_free_list(tx_ring, &pending_list);
1573 1718 }
1574 1719 }
1575 1720
1576 1721 /*
1577 1722 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1578 1723 * transmitted.
1579 1724 */
1580 1725 static boolean_t
1581 1726 ixgbe_tx_drain(ixgbe_t *ixgbe)
1582 1727 {
1583 1728 ixgbe_tx_ring_t *tx_ring;
1584 1729 boolean_t done;
1585 1730 int i, j;
1586 1731
1587 1732 /*
1588 1733 * Wait for a specific time to allow pending tx packets
1589 1734 * to be transmitted.
1590 1735 *
1591 1736 * Check the counter tbd_free to see if transmission is done.
1592 1737 * No lock protection is needed here.
1593 1738 *
1594 1739 * Return B_TRUE if all pending packets have been transmitted;
1595 1740 * Otherwise return B_FALSE;
1596 1741 */
1597 1742 for (i = 0; i < TX_DRAIN_TIME; i++) {
1598 1743
1599 1744 done = B_TRUE;
1600 1745 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1601 1746 tx_ring = &ixgbe->tx_rings[j];
1602 1747 done = done &&
1603 1748 (tx_ring->tbd_free == tx_ring->ring_size);
1604 1749 }
1605 1750
1606 1751 if (done)
1607 1752 break;
1608 1753
1609 1754 msec_delay(1);
1610 1755 }
1611 1756
1612 1757 return (done);
1613 1758 }
1614 1759
1615 1760 /*
1616 1761 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1617 1762 */
1618 1763 static boolean_t
1619 1764 ixgbe_rx_drain(ixgbe_t *ixgbe)
1620 1765 {
1621 1766 boolean_t done = B_TRUE;
1622 1767 int i;
1623 1768
1624 1769 /*
1625 1770 * Polling the rx free list to check if those rx buffers held by
1626 1771 * the upper layer are released.
1627 1772 *
1628 1773 * Check the counter rcb_free to see if all pending buffers are
1629 1774 * released. No lock protection is needed here.
1630 1775 *
1631 1776 * Return B_TRUE if all pending buffers have been released;
1632 1777 * Otherwise return B_FALSE;
1633 1778 */
1634 1779 for (i = 0; i < RX_DRAIN_TIME; i++) {
1635 1780 done = (ixgbe->rcb_pending == 0);
1636 1781
1637 1782 if (done)
1638 1783 break;
1639 1784
1640 1785 msec_delay(1);
1641 1786 }
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
1642 1787
1643 1788 return (done);
1644 1789 }
1645 1790
1646 1791 /*
1647 1792 * ixgbe_start - Start the driver/chipset.
1648 1793 */
1649 1794 int
1650 1795 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1651 1796 {
1797 + struct ixgbe_hw *hw = &ixgbe->hw;
1652 1798 int i;
1653 1799
1654 1800 ASSERT(mutex_owned(&ixgbe->gen_lock));
1655 1801
1656 1802 if (alloc_buffer) {
1657 1803 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1658 1804 ixgbe_error(ixgbe,
1659 1805 "Failed to allocate software receive rings");
1660 1806 return (IXGBE_FAILURE);
1661 1807 }
1662 1808
1663 1809 /* Allocate buffers for all the rx/tx rings */
1664 1810 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1665 1811 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1666 1812 return (IXGBE_FAILURE);
1667 1813 }
1668 1814
1669 1815 ixgbe->tx_ring_init = B_TRUE;
1670 1816 } else {
1671 1817 ixgbe->tx_ring_init = B_FALSE;
1672 1818 }
1673 1819
1674 1820 for (i = 0; i < ixgbe->num_rx_rings; i++)
1675 1821 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1676 1822 for (i = 0; i < ixgbe->num_tx_rings; i++)
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
1677 1823 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1678 1824
1679 1825 /*
1680 1826 * Start the chipset hardware
1681 1827 */
1682 1828 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1683 1829 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1684 1830 goto start_failure;
1685 1831 }
1686 1832
1833 + /*
1834 + * Configure link now for X550
1835 + *
1836 + * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the
1837 + * resting state of the adapter at a 1Gb FDX speed. Prior to the X550,
1838 + * the resting state of the link would be the maximum speed that
1839 + * autonegotiation will allow (usually 10Gb, infrastructure allowing)
1840 + * so we never bothered with explicitly setting the link to 10Gb as it
1841 + * would already be at that state on driver attach. With X550, we must
1842 + * trigger a re-negotiation of the link in order to switch from a LPLU
1843 + * 1Gb link to 10Gb (cable and link partner permitting.)
1844 + */
1845 + if (hw->mac.type == ixgbe_mac_X550 ||
1846 + hw->mac.type == ixgbe_mac_X550EM_x) {
1847 + (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
1848 + ixgbe_get_hw_state(ixgbe);
1849 + }
1850 +
1687 1851 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1688 1852 goto start_failure;
1689 1853 }
1690 1854
1691 1855 /*
1692 1856 * Setup the rx/tx rings
1693 1857 */
1694 1858 ixgbe_setup_rings(ixgbe);
1695 1859
1696 1860 /*
1697 1861 * ixgbe_start() will be called when resetting, however if reset
1698 1862 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1699 1863 * before enabling the interrupts.
1700 1864 */
1701 1865 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1702 1866 | IXGBE_STALL| IXGBE_OVERTEMP));
1703 1867
1704 1868 /*
1705 1869 * Enable adapter interrupts
1706 1870 * The interrupts must be enabled after the driver state is START
1707 1871 */
1708 1872 ixgbe_enable_adapter_interrupts(ixgbe);
1709 1873
1710 1874 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1711 1875 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1712 1876 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1713 1877 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1714 1878
1715 1879 return (IXGBE_SUCCESS);
1716 1880
1717 1881 start_failure:
1718 1882 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1719 1883 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1720 1884 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1721 1885 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1722 1886
1723 1887 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1724 1888
1725 1889 return (IXGBE_FAILURE);
1726 1890 }
1727 1891
1728 1892 /*
1729 1893 * ixgbe_stop - Stop the driver/chipset.
1730 1894 */
1731 1895 void
1732 1896 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1733 1897 {
1734 1898 int i;
1735 1899
1736 1900 ASSERT(mutex_owned(&ixgbe->gen_lock));
1737 1901
1738 1902 /*
1739 1903 * Disable the adapter interrupts
1740 1904 */
1741 1905 ixgbe_disable_adapter_interrupts(ixgbe);
1742 1906
1743 1907 /*
1744 1908 * Drain the pending tx packets
1745 1909 */
1746 1910 (void) ixgbe_tx_drain(ixgbe);
1747 1911
1748 1912 for (i = 0; i < ixgbe->num_rx_rings; i++)
1749 1913 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1750 1914 for (i = 0; i < ixgbe->num_tx_rings; i++)
1751 1915 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1752 1916
1753 1917 /*
1754 1918 * Stop the chipset hardware
1755 1919 */
1756 1920 ixgbe_chip_stop(ixgbe);
1757 1921
1758 1922 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1759 1923 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1760 1924 }
1761 1925
1762 1926 /*
1763 1927 * Clean the pending tx data/resources
1764 1928 */
1765 1929 ixgbe_tx_clean(ixgbe);
1766 1930
1767 1931 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1768 1932 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1769 1933 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1770 1934 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1771 1935
1772 1936 if (ixgbe->link_state == LINK_STATE_UP) {
1773 1937 ixgbe->link_state = LINK_STATE_UNKNOWN;
1774 1938 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1775 1939 }
1776 1940
1777 1941 if (free_buffer) {
1778 1942 /*
1779 1943 * Release the DMA/memory resources of rx/tx rings
1780 1944 */
1781 1945 ixgbe_free_dma(ixgbe);
1782 1946 ixgbe_free_rx_data(ixgbe);
1783 1947 }
1784 1948 }
1785 1949
1786 1950 /*
1787 1951 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1788 1952 */
1789 1953 /* ARGSUSED */
1790 1954 static int
1791 1955 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1792 1956 void *arg1, void *arg2)
1793 1957 {
1794 1958 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1795 1959
1796 1960 switch (cbaction) {
1797 1961 /* IRM callback */
1798 1962 int count;
1799 1963 case DDI_CB_INTR_ADD:
1800 1964 case DDI_CB_INTR_REMOVE:
1801 1965 count = (int)(uintptr_t)cbarg;
1802 1966 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1803 1967 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1804 1968 int, ixgbe->intr_cnt);
1805 1969 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1806 1970 DDI_SUCCESS) {
1807 1971 ixgbe_error(ixgbe,
1808 1972 "IRM CB: Failed to adjust interrupts");
1809 1973 goto cb_fail;
1810 1974 }
1811 1975 break;
1812 1976 default:
1813 1977 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1814 1978 cbaction);
1815 1979 return (DDI_ENOTSUP);
1816 1980 }
1817 1981 return (DDI_SUCCESS);
1818 1982 cb_fail:
1819 1983 return (DDI_FAILURE);
1820 1984 }
1821 1985
1822 1986 /*
1823 1987 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1824 1988 */
1825 1989 static int
1826 1990 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1827 1991 {
1828 1992 int i, rc, actual;
1829 1993
1830 1994 if (count == 0)
1831 1995 return (DDI_SUCCESS);
1832 1996
1833 1997 if ((cbaction == DDI_CB_INTR_ADD &&
1834 1998 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1835 1999 (cbaction == DDI_CB_INTR_REMOVE &&
1836 2000 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1837 2001 return (DDI_FAILURE);
1838 2002
1839 2003 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1840 2004 return (DDI_FAILURE);
1841 2005 }
1842 2006
1843 2007 for (i = 0; i < ixgbe->num_rx_rings; i++)
1844 2008 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1845 2009 for (i = 0; i < ixgbe->num_tx_rings; i++)
1846 2010 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1847 2011
1848 2012 mutex_enter(&ixgbe->gen_lock);
1849 2013 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1850 2014 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1851 2015 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1852 2016 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1853 2017
1854 2018 ixgbe_stop(ixgbe, B_FALSE);
1855 2019 /*
1856 2020 * Disable interrupts
1857 2021 */
1858 2022 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1859 2023 rc = ixgbe_disable_intrs(ixgbe);
1860 2024 ASSERT(rc == IXGBE_SUCCESS);
1861 2025 }
1862 2026 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1863 2027
1864 2028 /*
1865 2029 * Remove interrupt handlers
1866 2030 */
1867 2031 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1868 2032 ixgbe_rem_intr_handlers(ixgbe);
1869 2033 }
1870 2034 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1871 2035
1872 2036 /*
1873 2037 * Clear vect_map
1874 2038 */
1875 2039 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1876 2040 switch (cbaction) {
1877 2041 case DDI_CB_INTR_ADD:
1878 2042 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1879 2043 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1880 2044 DDI_INTR_ALLOC_NORMAL);
1881 2045 if (rc != DDI_SUCCESS || actual != count) {
1882 2046 ixgbe_log(ixgbe, "Adjust interrupts failed."
1883 2047 "return: %d, irm cb size: %d, actual: %d",
1884 2048 rc, count, actual);
1885 2049 goto intr_adjust_fail;
1886 2050 }
1887 2051 ixgbe->intr_cnt += count;
1888 2052 break;
1889 2053
1890 2054 case DDI_CB_INTR_REMOVE:
1891 2055 for (i = ixgbe->intr_cnt - count;
1892 2056 i < ixgbe->intr_cnt; i ++) {
1893 2057 rc = ddi_intr_free(ixgbe->htable[i]);
1894 2058 ixgbe->htable[i] = NULL;
1895 2059 if (rc != DDI_SUCCESS) {
1896 2060 ixgbe_log(ixgbe, "Adjust interrupts failed."
1897 2061 "return: %d, irm cb size: %d, actual: %d",
1898 2062 rc, count, actual);
1899 2063 goto intr_adjust_fail;
1900 2064 }
1901 2065 }
1902 2066 ixgbe->intr_cnt -= count;
1903 2067 break;
1904 2068 }
1905 2069
1906 2070 /*
1907 2071 * Get priority for first vector, assume remaining are all the same
1908 2072 */
1909 2073 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1910 2074 if (rc != DDI_SUCCESS) {
1911 2075 ixgbe_log(ixgbe,
1912 2076 "Get interrupt priority failed: %d", rc);
1913 2077 goto intr_adjust_fail;
1914 2078 }
1915 2079 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1916 2080 if (rc != DDI_SUCCESS) {
1917 2081 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1918 2082 goto intr_adjust_fail;
1919 2083 }
1920 2084 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1921 2085
1922 2086 /*
1923 2087 * Map rings to interrupt vectors
1924 2088 */
1925 2089 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1926 2090 ixgbe_error(ixgbe,
1927 2091 "IRM CB: Failed to map interrupts to vectors");
1928 2092 goto intr_adjust_fail;
1929 2093 }
1930 2094
1931 2095 /*
1932 2096 * Add interrupt handlers
1933 2097 */
1934 2098 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1935 2099 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1936 2100 goto intr_adjust_fail;
1937 2101 }
1938 2102 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1939 2103
1940 2104 /*
1941 2105 * Now that mutex locks are initialized, and the chip is also
1942 2106 * initialized, enable interrupts.
1943 2107 */
1944 2108 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1945 2109 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1946 2110 goto intr_adjust_fail;
1947 2111 }
1948 2112 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1949 2113 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1950 2114 ixgbe_error(ixgbe, "IRM CB: Failed to start");
1951 2115 goto intr_adjust_fail;
1952 2116 }
1953 2117 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1954 2118 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1955 2119 ixgbe->ixgbe_state |= IXGBE_STARTED;
1956 2120 mutex_exit(&ixgbe->gen_lock);
1957 2121
1958 2122 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1959 2123 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1960 2124 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1961 2125 }
1962 2126 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1963 2127 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1964 2128 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1965 2129 }
1966 2130
1967 2131 /* Wakeup all Tx rings */
1968 2132 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1969 2133 mac_tx_ring_update(ixgbe->mac_hdl,
1970 2134 ixgbe->tx_rings[i].ring_handle);
1971 2135 }
1972 2136
1973 2137 IXGBE_DEBUGLOG_3(ixgbe,
1974 2138 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1975 2139 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1976 2140 return (DDI_SUCCESS);
1977 2141
1978 2142 intr_adjust_fail:
1979 2143 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1980 2144 mutex_exit(&ixgbe->gen_lock);
1981 2145 return (DDI_FAILURE);
1982 2146 }
1983 2147
1984 2148 /*
1985 2149 * ixgbe_intr_cb_register - Register interrupt callback function.
1986 2150 */
1987 2151 static int
1988 2152 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1989 2153 {
1990 2154 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1991 2155 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1992 2156 return (IXGBE_FAILURE);
1993 2157 }
1994 2158 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1995 2159 return (IXGBE_SUCCESS);
1996 2160 }
1997 2161
1998 2162 /*
1999 2163 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
2000 2164 */
2001 2165 static int
2002 2166 ixgbe_alloc_rings(ixgbe_t *ixgbe)
2003 2167 {
2004 2168 /*
2005 2169 * Allocate memory space for rx rings
2006 2170 */
2007 2171 ixgbe->rx_rings = kmem_zalloc(
2008 2172 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
2009 2173 KM_NOSLEEP);
2010 2174
2011 2175 if (ixgbe->rx_rings == NULL) {
2012 2176 return (IXGBE_FAILURE);
2013 2177 }
2014 2178
2015 2179 /*
2016 2180 * Allocate memory space for tx rings
2017 2181 */
2018 2182 ixgbe->tx_rings = kmem_zalloc(
2019 2183 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
2020 2184 KM_NOSLEEP);
2021 2185
2022 2186 if (ixgbe->tx_rings == NULL) {
2023 2187 kmem_free(ixgbe->rx_rings,
2024 2188 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2025 2189 ixgbe->rx_rings = NULL;
2026 2190 return (IXGBE_FAILURE);
2027 2191 }
2028 2192
2029 2193 /*
2030 2194 * Allocate memory space for rx ring groups
2031 2195 */
2032 2196 ixgbe->rx_groups = kmem_zalloc(
2033 2197 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
2034 2198 KM_NOSLEEP);
2035 2199
2036 2200 if (ixgbe->rx_groups == NULL) {
2037 2201 kmem_free(ixgbe->rx_rings,
2038 2202 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2039 2203 kmem_free(ixgbe->tx_rings,
2040 2204 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2041 2205 ixgbe->rx_rings = NULL;
2042 2206 ixgbe->tx_rings = NULL;
2043 2207 return (IXGBE_FAILURE);
2044 2208 }
2045 2209
2046 2210 return (IXGBE_SUCCESS);
2047 2211 }
2048 2212
2049 2213 /*
2050 2214 * ixgbe_free_rings - Free the memory space of rx/tx rings.
2051 2215 */
2052 2216 static void
2053 2217 ixgbe_free_rings(ixgbe_t *ixgbe)
2054 2218 {
2055 2219 if (ixgbe->rx_rings != NULL) {
2056 2220 kmem_free(ixgbe->rx_rings,
2057 2221 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2058 2222 ixgbe->rx_rings = NULL;
2059 2223 }
2060 2224
2061 2225 if (ixgbe->tx_rings != NULL) {
2062 2226 kmem_free(ixgbe->tx_rings,
2063 2227 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2064 2228 ixgbe->tx_rings = NULL;
2065 2229 }
2066 2230
2067 2231 if (ixgbe->rx_groups != NULL) {
2068 2232 kmem_free(ixgbe->rx_groups,
2069 2233 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2070 2234 ixgbe->rx_groups = NULL;
2071 2235 }
2072 2236 }
2073 2237
2074 2238 static int
2075 2239 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2076 2240 {
2077 2241 ixgbe_rx_ring_t *rx_ring;
2078 2242 int i;
2079 2243
2080 2244 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2081 2245 rx_ring = &ixgbe->rx_rings[i];
2082 2246 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2083 2247 goto alloc_rx_rings_failure;
2084 2248 }
2085 2249 return (IXGBE_SUCCESS);
2086 2250
2087 2251 alloc_rx_rings_failure:
2088 2252 ixgbe_free_rx_data(ixgbe);
2089 2253 return (IXGBE_FAILURE);
2090 2254 }
2091 2255
2092 2256 static void
2093 2257 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2094 2258 {
2095 2259 ixgbe_rx_ring_t *rx_ring;
2096 2260 ixgbe_rx_data_t *rx_data;
2097 2261 int i;
2098 2262
2099 2263 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2100 2264 rx_ring = &ixgbe->rx_rings[i];
2101 2265
2102 2266 mutex_enter(&ixgbe->rx_pending_lock);
2103 2267 rx_data = rx_ring->rx_data;
2104 2268
2105 2269 if (rx_data != NULL) {
2106 2270 rx_data->flag |= IXGBE_RX_STOPPED;
2107 2271
2108 2272 if (rx_data->rcb_pending == 0) {
2109 2273 ixgbe_free_rx_ring_data(rx_data);
2110 2274 rx_ring->rx_data = NULL;
2111 2275 }
2112 2276 }
2113 2277
2114 2278 mutex_exit(&ixgbe->rx_pending_lock);
2115 2279 }
2116 2280 }
2117 2281
2118 2282 /*
2119 2283 * ixgbe_setup_rings - Setup rx/tx rings.
2120 2284 */
2121 2285 static void
2122 2286 ixgbe_setup_rings(ixgbe_t *ixgbe)
2123 2287 {
2124 2288 /*
2125 2289 * Setup the rx/tx rings, including the following:
2126 2290 *
2127 2291 * 1. Setup the descriptor ring and the control block buffers;
2128 2292 * 2. Initialize necessary registers for receive/transmit;
2129 2293 * 3. Initialize software pointers/parameters for receive/transmit;
2130 2294 */
2131 2295 ixgbe_setup_rx(ixgbe);
2132 2296
2133 2297 ixgbe_setup_tx(ixgbe);
2134 2298 }
2135 2299
2136 2300 static void
2137 2301 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2138 2302 {
2139 2303 ixgbe_t *ixgbe = rx_ring->ixgbe;
2140 2304 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2141 2305 struct ixgbe_hw *hw = &ixgbe->hw;
2142 2306 rx_control_block_t *rcb;
2143 2307 union ixgbe_adv_rx_desc *rbd;
2144 2308 uint32_t size;
2145 2309 uint32_t buf_low;
2146 2310 uint32_t buf_high;
2147 2311 uint32_t reg_val;
2148 2312 int i;
2149 2313
2150 2314 ASSERT(mutex_owned(&rx_ring->rx_lock));
2151 2315 ASSERT(mutex_owned(&ixgbe->gen_lock));
2152 2316
2153 2317 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2154 2318 rcb = rx_data->work_list[i];
2155 2319 rbd = &rx_data->rbd_ring[i];
2156 2320
2157 2321 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2158 2322 rbd->read.hdr_addr = NULL;
2159 2323 }
2160 2324
2161 2325 /*
2162 2326 * Initialize the length register
2163 2327 */
2164 2328 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2165 2329 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2166 2330
2167 2331 /*
2168 2332 * Initialize the base address registers
2169 2333 */
2170 2334 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2171 2335 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2172 2336 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2173 2337 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2174 2338
2175 2339 /*
2176 2340 * Setup head & tail pointers
2177 2341 */
2178 2342 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2179 2343 rx_data->ring_size - 1);
2180 2344 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2181 2345
2182 2346 rx_data->rbd_next = 0;
2183 2347 rx_data->lro_first = 0;
↓ open down ↓ |
487 lines elided |
↑ open up ↑ |
2184 2348
2185 2349 /*
2186 2350 * Setup the Receive Descriptor Control Register (RXDCTL)
2187 2351 * PTHRESH=32 descriptors (half the internal cache)
2188 2352 * HTHRESH=0 descriptors (to minimize latency on fetch)
2189 2353 * WTHRESH defaults to 1 (writeback each descriptor)
2190 2354 */
2191 2355 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2192 2356 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2193 2357
2194 - /* Not a valid value for 82599 or X540 */
2358 + /* Not a valid value for 82599, X540 or X550 */
2195 2359 if (hw->mac.type == ixgbe_mac_82598EB) {
2196 2360 reg_val |= 0x0020; /* pthresh */
2197 2361 }
2198 2362 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2199 2363
2200 2364 if (hw->mac.type == ixgbe_mac_82599EB ||
2201 - hw->mac.type == ixgbe_mac_X540) {
2365 + hw->mac.type == ixgbe_mac_X540 ||
2366 + hw->mac.type == ixgbe_mac_X550 ||
2367 + hw->mac.type == ixgbe_mac_X550EM_x) {
2202 2368 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2203 2369 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2204 2370 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2205 2371 }
2206 2372
2207 2373 /*
2208 2374 * Setup the Split and Replication Receive Control Register.
2209 2375 * Set the rx buffer size and the advanced descriptor type.
2210 2376 */
2211 2377 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2212 2378 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2213 2379 reg_val |= IXGBE_SRRCTL_DROP_EN;
2214 2380 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2215 2381 }
2216 2382
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2217 2383 static void
2218 2384 ixgbe_setup_rx(ixgbe_t *ixgbe)
2219 2385 {
2220 2386 ixgbe_rx_ring_t *rx_ring;
2221 2387 struct ixgbe_hw *hw = &ixgbe->hw;
2222 2388 uint32_t reg_val;
2223 2389 uint32_t ring_mapping;
2224 2390 uint32_t i, index;
2225 2391 uint32_t psrtype_rss_bit;
2226 2392
2393 + /*
2394 + * Ensure that Rx is disabled while setting up
2395 + * the Rx unit and Rx descriptor ring(s)
2396 + */
2397 + ixgbe_disable_rx(hw);
2398 +
2227 2399 /* PSRTYPE must be configured for 82599 */
2228 2400 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2229 2401 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2230 2402 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2231 2403 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2232 2404 reg_val |= IXGBE_PSRTYPE_L2HDR;
2233 2405 reg_val |= 0x80000000;
2234 2406 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2235 2407 } else {
2236 2408 if (ixgbe->num_rx_groups > 32) {
2237 2409 psrtype_rss_bit = 0x20000000;
2238 2410 } else {
2239 2411 psrtype_rss_bit = 0x40000000;
2240 2412 }
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
2241 2413 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2242 2414 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2243 2415 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2244 2416 reg_val |= IXGBE_PSRTYPE_L2HDR;
2245 2417 reg_val |= psrtype_rss_bit;
2246 2418 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2247 2419 }
2248 2420 }
2249 2421
2250 2422 /*
2251 - * Set filter control in FCTRL to accept broadcast packets and do
2252 - * not pass pause frames to host. Flow control settings are already
2253 - * in this register, so preserve them.
2423 + * Set filter control in FCTRL to determine types of packets are passed
2424 + * up to the driver.
2425 + * - Pass broadcast packets.
2426 + * - Do not pass flow control pause frames (82598-specific)
2254 2427 */
2255 2428 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2256 - reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
2257 - reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
2429 + reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */
2430 + if (hw->mac.type == ixgbe_mac_82598EB) {
2431 + reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */
2432 + }
2258 2433 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2259 2434
2260 2435 /*
2261 2436 * Hardware checksum settings
2262 2437 */
2263 2438 if (ixgbe->rx_hcksum_enable) {
2264 - reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2439 + reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2440 + reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2265 2441 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2266 2442 }
2267 2443
2268 2444 /*
2269 2445 * Setup VMDq and RSS for multiple receive queues
2270 2446 */
2271 2447 switch (ixgbe->classify_mode) {
2272 2448 case IXGBE_CLASSIFY_RSS:
2273 2449 /*
2274 2450 * One group, only RSS is needed when more than
2275 2451 * one ring enabled.
2276 2452 */
2277 2453 ixgbe_setup_rss(ixgbe);
2278 2454 break;
2279 2455
2280 2456 case IXGBE_CLASSIFY_VMDQ:
2281 2457 /*
2282 2458 * Multiple groups, each group has one ring,
2283 2459 * only VMDq is needed.
2284 2460 */
2285 2461 ixgbe_setup_vmdq(ixgbe);
2286 2462 break;
2287 2463
2288 2464 case IXGBE_CLASSIFY_VMDQ_RSS:
2289 2465 /*
2290 2466 * Multiple groups and multiple rings, both
2291 2467 * VMDq and RSS are needed.
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2292 2468 */
2293 2469 ixgbe_setup_vmdq_rss(ixgbe);
2294 2470 break;
2295 2471
2296 2472 default:
2297 2473 break;
2298 2474 }
2299 2475
2300 2476 /*
2301 2477 * Enable the receive unit. This must be done after filter
2302 - * control is set in FCTRL.
2478 + * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2479 + * 82598 is the only adapter which defines this RXCTRL option.
2303 2480 */
2304 - reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
2305 - | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
2306 - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2481 + reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2482 + if (hw->mac.type == ixgbe_mac_82598EB)
2483 + reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2484 + reg_val |= IXGBE_RXCTRL_RXEN;
2485 + (void) ixgbe_enable_rx_dma(hw, reg_val);
2307 2486
2308 2487 /*
2309 2488 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2310 2489 */
2311 2490 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2312 2491 rx_ring = &ixgbe->rx_rings[i];
2313 2492 ixgbe_setup_rx_ring(rx_ring);
2314 2493 }
2315 2494
2316 2495 /*
2317 2496 * Setup the per-ring statistics mapping.
2318 2497 */
2319 2498 ring_mapping = 0;
2320 2499 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2321 2500 index = ixgbe->rx_rings[i].hw_index;
2322 2501 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
2323 2502 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2324 2503 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2325 2504 }
2326 2505
2327 2506 /*
2328 2507 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2329 2508 * by four bytes if the packet has a VLAN field, so includes MTU,
2330 2509 * ethernet header and frame check sequence.
2331 2510 * Register is MAXFRS in 82599.
2332 2511 */
2333 - reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2512 + reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD);
2513 + reg_val &= ~IXGBE_MHADD_MFS_MASK;
2514 + reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header)
2334 2515 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2335 2516 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2336 2517
2337 2518 /*
2338 2519 * Setup Jumbo Frame enable bit
2339 2520 */
2340 - if (ixgbe->default_mtu > ETHERMTU) {
2341 - reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2521 + reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2522 + if (ixgbe->default_mtu > ETHERMTU)
2342 2523 reg_val |= IXGBE_HLREG0_JUMBOEN;
2343 - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2344 - }
2524 + else
2525 + reg_val &= ~IXGBE_HLREG0_JUMBOEN;
2526 + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2345 2527
2346 2528 /*
2347 2529 * Setup RSC for multiple receive queues.
2348 2530 */
2349 2531 if (ixgbe->lro_enable) {
2350 2532 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2351 2533 /*
2352 2534 * Make sure rx_buf_size * MAXDESC not greater
2353 2535 * than 65535.
2354 2536 * Intel recommends 4 for MAXDESC field value.
2355 2537 */
2356 2538 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2357 2539 reg_val |= IXGBE_RSCCTL_RSCEN;
2358 2540 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2359 2541 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2360 2542 else
2361 2543 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2362 2544 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2363 2545 }
2364 2546
2365 2547 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2366 2548 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2367 2549 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2368 2550
2369 2551 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2370 2552 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2371 2553 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2372 2554 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2373 2555
2374 2556 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2375 2557 }
2376 2558 }
2377 2559
2378 2560 static void
2379 2561 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2380 2562 {
2381 2563 ixgbe_t *ixgbe = tx_ring->ixgbe;
2382 2564 struct ixgbe_hw *hw = &ixgbe->hw;
2383 2565 uint32_t size;
2384 2566 uint32_t buf_low;
2385 2567 uint32_t buf_high;
2386 2568 uint32_t reg_val;
2387 2569
2388 2570 ASSERT(mutex_owned(&tx_ring->tx_lock));
2389 2571 ASSERT(mutex_owned(&ixgbe->gen_lock));
2390 2572
2391 2573 /*
2392 2574 * Initialize the length register
2393 2575 */
2394 2576 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2395 2577 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2396 2578
2397 2579 /*
2398 2580 * Initialize the base address registers
2399 2581 */
2400 2582 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2401 2583 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2402 2584 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2403 2585 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2404 2586
2405 2587 /*
2406 2588 * Setup head & tail pointers
2407 2589 */
2408 2590 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2409 2591 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2410 2592
2411 2593 /*
2412 2594 * Setup head write-back
2413 2595 */
2414 2596 if (ixgbe->tx_head_wb_enable) {
2415 2597 /*
2416 2598 * The memory of the head write-back is allocated using
2417 2599 * the extra tbd beyond the tail of the tbd ring.
2418 2600 */
2419 2601 tx_ring->tbd_head_wb = (uint32_t *)
2420 2602 ((uintptr_t)tx_ring->tbd_area.address + size);
2421 2603 *tx_ring->tbd_head_wb = 0;
2422 2604
2423 2605 buf_low = (uint32_t)
2424 2606 (tx_ring->tbd_area.dma_address + size);
2425 2607 buf_high = (uint32_t)
2426 2608 ((tx_ring->tbd_area.dma_address + size) >> 32);
2427 2609
2428 2610 /* Set the head write-back enable bit */
2429 2611 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2430 2612
2431 2613 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2432 2614 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2433 2615
2434 2616 /*
2435 2617 * Turn off relaxed ordering for head write back or it will
2436 2618 * cause problems with the tx recycling
2437 2619 */
2438 2620
2439 2621 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2440 2622 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2441 2623 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2442 2624 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2443 2625 if (hw->mac.type == ixgbe_mac_82598EB) {
2444 2626 IXGBE_WRITE_REG(hw,
2445 2627 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2446 2628 } else {
2447 2629 IXGBE_WRITE_REG(hw,
2448 2630 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2449 2631 }
2450 2632 } else {
2451 2633 tx_ring->tbd_head_wb = NULL;
2452 2634 }
2453 2635
2454 2636 tx_ring->tbd_head = 0;
2455 2637 tx_ring->tbd_tail = 0;
2456 2638 tx_ring->tbd_free = tx_ring->ring_size;
2457 2639
2458 2640 if (ixgbe->tx_ring_init == B_TRUE) {
2459 2641 tx_ring->tcb_head = 0;
2460 2642 tx_ring->tcb_tail = 0;
2461 2643 tx_ring->tcb_free = tx_ring->free_list_size;
2462 2644 }
2463 2645
2464 2646 /*
2465 2647 * Initialize the s/w context structure
2466 2648 */
2467 2649 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2468 2650 }
2469 2651
2470 2652 static void
2471 2653 ixgbe_setup_tx(ixgbe_t *ixgbe)
2472 2654 {
2473 2655 struct ixgbe_hw *hw = &ixgbe->hw;
2474 2656 ixgbe_tx_ring_t *tx_ring;
2475 2657 uint32_t reg_val;
2476 2658 uint32_t ring_mapping;
2477 2659 int i;
2478 2660
2479 2661 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2480 2662 tx_ring = &ixgbe->tx_rings[i];
2481 2663 ixgbe_setup_tx_ring(tx_ring);
2482 2664 }
2483 2665
2484 2666 /*
2485 2667 * Setup the per-ring statistics mapping.
2486 2668 */
2487 2669 ring_mapping = 0;
2488 2670 for (i = 0; i < ixgbe->num_tx_rings; i++) {
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
2489 2671 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2490 2672 if ((i & 0x3) == 0x3) {
2491 2673 switch (hw->mac.type) {
2492 2674 case ixgbe_mac_82598EB:
2493 2675 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2494 2676 ring_mapping);
2495 2677 break;
2496 2678
2497 2679 case ixgbe_mac_82599EB:
2498 2680 case ixgbe_mac_X540:
2681 + case ixgbe_mac_X550:
2682 + case ixgbe_mac_X550EM_x:
2499 2683 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2500 2684 ring_mapping);
2501 2685 break;
2502 2686
2503 2687 default:
2504 2688 break;
2505 2689 }
2506 2690
2507 2691 ring_mapping = 0;
2508 2692 }
2509 2693 }
2510 2694 if (i & 0x3) {
2511 2695 switch (hw->mac.type) {
2512 2696 case ixgbe_mac_82598EB:
2513 2697 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2514 2698 break;
2515 2699
2516 2700 case ixgbe_mac_82599EB:
2517 2701 case ixgbe_mac_X540:
2702 + case ixgbe_mac_X550:
2703 + case ixgbe_mac_X550EM_x:
2518 2704 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2519 2705 break;
2520 2706
2521 2707 default:
2522 2708 break;
2523 2709 }
2524 2710 }
2525 2711
2526 2712 /*
2527 2713 * Enable CRC appending and TX padding (for short tx frames)
2528 2714 */
2529 2715 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2530 2716 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2531 2717 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2532 2718
2533 2719 /*
2534 - * enable DMA for 82599 and X540 parts
2720 + * enable DMA for 82599, X540 and X550 parts
2535 2721 */
2536 2722 if (hw->mac.type == ixgbe_mac_82599EB ||
2537 - hw->mac.type == ixgbe_mac_X540) {
2723 + hw->mac.type == ixgbe_mac_X540 ||
2724 + hw->mac.type == ixgbe_mac_X550 ||
2725 + hw->mac.type == ixgbe_mac_X550EM_x) {
2538 2726 /* DMATXCTL.TE must be set after all Tx config is complete */
2539 2727 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2540 2728 reg_val |= IXGBE_DMATXCTL_TE;
2541 2729 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2542 2730
2543 2731 /* Disable arbiter to set MTQC */
2544 2732 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2545 2733 reg_val |= IXGBE_RTTDCS_ARBDIS;
2546 2734 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2547 2735 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2548 2736 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2549 2737 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2550 2738 }
2551 2739
2552 2740 /*
2553 2741 * Enabling tx queues ..
2554 2742 * For 82599 must be done after DMATXCTL.TE is set
2555 2743 */
2556 2744 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2557 2745 tx_ring = &ixgbe->tx_rings[i];
2558 2746 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2559 2747 reg_val |= IXGBE_TXDCTL_ENABLE;
2560 2748 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2561 2749 }
2562 2750 }
2563 2751
2564 2752 /*
2565 2753 * ixgbe_setup_rss - Setup receive-side scaling feature.
2566 2754 */
2567 2755 static void
2568 2756 ixgbe_setup_rss(ixgbe_t *ixgbe)
2569 2757 {
2570 2758 struct ixgbe_hw *hw = &ixgbe->hw;
2571 - uint32_t i, mrqc, rxcsum;
2572 - uint32_t random;
2573 - uint32_t reta;
2574 - uint32_t ring_per_group;
2759 + uint32_t mrqc;
2575 2760
2576 2761 /*
2577 - * Fill out redirection table
2762 + * Initialize RETA/ERETA table
2578 2763 */
2579 - reta = 0;
2580 - ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2764 + ixgbe_setup_rss_table(ixgbe);
2581 2765
2582 - for (i = 0; i < 128; i++) {
2583 - reta = (reta << 8) | (i % ring_per_group) |
2584 - ((i % ring_per_group) << 4);
2585 - if ((i & 3) == 3)
2586 - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2587 - }
2588 -
2589 2766 /*
2590 - * Fill out hash function seeds with a random constant
2591 - */
2592 - for (i = 0; i < 10; i++) {
2593 - (void) random_get_pseudo_bytes((uint8_t *)&random,
2594 - sizeof (uint32_t));
2595 - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2596 - }
2597 -
2598 - /*
2599 2767 * Enable RSS & perform hash on these packet types
2600 2768 */
2601 2769 mrqc = IXGBE_MRQC_RSSEN |
2602 2770 IXGBE_MRQC_RSS_FIELD_IPV4 |
2603 2771 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2604 2772 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2605 2773 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2606 2774 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2607 2775 IXGBE_MRQC_RSS_FIELD_IPV6 |
2608 2776 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2609 2777 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2610 2778 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2611 2779 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2612 -
2613 - /*
2614 - * Disable Packet Checksum to enable RSS for multiple receive queues.
2615 - * It is an adapter hardware limitation that Packet Checksum is
2616 - * mutually exclusive with RSS.
2617 - */
2618 - rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2619 - rxcsum |= IXGBE_RXCSUM_PCSD;
2620 - rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2621 - IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2622 2780 }
2623 2781
2624 2782 /*
2625 2783 * ixgbe_setup_vmdq - Setup MAC classification feature
2626 2784 */
2627 2785 static void
2628 2786 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2629 2787 {
2630 2788 struct ixgbe_hw *hw = &ixgbe->hw;
2631 2789 uint32_t vmdctl, i, vtctl;
2632 2790
2633 2791 /*
2634 2792 * Setup the VMDq Control register, enable VMDq based on
2635 2793 * packet destination MAC address:
2636 2794 */
2637 2795 switch (hw->mac.type) {
2638 2796 case ixgbe_mac_82598EB:
2639 2797 /*
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
2640 2798 * VMDq Enable = 1;
2641 2799 * VMDq Filter = 0; MAC filtering
2642 2800 * Default VMDq output index = 0;
2643 2801 */
2644 2802 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2645 2803 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2646 2804 break;
2647 2805
2648 2806 case ixgbe_mac_82599EB:
2649 2807 case ixgbe_mac_X540:
2808 + case ixgbe_mac_X550:
2809 + case ixgbe_mac_X550EM_x:
2650 2810 /*
2651 2811 * Enable VMDq-only.
2652 2812 */
2653 2813 vmdctl = IXGBE_MRQC_VMDQEN;
2654 2814 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2655 2815
2656 2816 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2657 2817 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2658 2818 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2659 2819 }
2660 2820
2661 2821 /*
2662 2822 * Enable Virtualization and Replication.
2663 2823 */
2664 2824 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2665 2825 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2666 2826
2667 2827 /*
2668 2828 * Enable receiving packets to all VFs
2669 2829 */
2670 2830 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2671 2831 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2672 2832 break;
2673 2833
2674 2834 default:
2675 2835 break;
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
2676 2836 }
2677 2837 }
2678 2838
2679 2839 /*
2680 2840 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2681 2841 */
2682 2842 static void
2683 2843 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2684 2844 {
2685 2845 struct ixgbe_hw *hw = &ixgbe->hw;
2686 - uint32_t i, mrqc, rxcsum;
2687 - uint32_t random;
2688 - uint32_t reta;
2689 - uint32_t ring_per_group;
2690 - uint32_t vmdctl, vtctl;
2846 + uint32_t i, mrqc;
2847 + uint32_t vtctl, vmdctl;
2691 2848
2692 2849 /*
2693 - * Fill out redirection table
2850 + * Initialize RETA/ERETA table
2694 2851 */
2695 - reta = 0;
2696 - ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2697 - for (i = 0; i < 128; i++) {
2698 - reta = (reta << 8) | (i % ring_per_group) |
2699 - ((i % ring_per_group) << 4);
2700 - if ((i & 3) == 3)
2701 - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2702 - }
2852 + ixgbe_setup_rss_table(ixgbe);
2703 2853
2704 2854 /*
2705 - * Fill out hash function seeds with a random constant
2706 - */
2707 - for (i = 0; i < 10; i++) {
2708 - (void) random_get_pseudo_bytes((uint8_t *)&random,
2709 - sizeof (uint32_t));
2710 - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2711 - }
2712 -
2713 - /*
2714 2855 * Enable and setup RSS and VMDq
2715 2856 */
2716 2857 switch (hw->mac.type) {
2717 2858 case ixgbe_mac_82598EB:
2718 2859 /*
2719 2860 * Enable RSS & Setup RSS Hash functions
2720 2861 */
2721 2862 mrqc = IXGBE_MRQC_RSSEN |
2722 2863 IXGBE_MRQC_RSS_FIELD_IPV4 |
2723 2864 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2724 2865 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2725 2866 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2726 2867 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2727 2868 IXGBE_MRQC_RSS_FIELD_IPV6 |
2728 2869 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2729 2870 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2730 2871 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2731 2872 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2732 2873
2733 2874 /*
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
2734 2875 * Enable and Setup VMDq
2735 2876 * VMDq Filter = 0; MAC filtering
2736 2877 * Default VMDq output index = 0;
2737 2878 */
2738 2879 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2739 2880 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2740 2881 break;
2741 2882
2742 2883 case ixgbe_mac_82599EB:
2743 2884 case ixgbe_mac_X540:
2885 + case ixgbe_mac_X550:
2886 + case ixgbe_mac_X550EM_x:
2744 2887 /*
2745 2888 * Enable RSS & Setup RSS Hash functions
2746 2889 */
2747 2890 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2748 2891 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2749 2892 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2750 2893 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2751 2894 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2752 2895 IXGBE_MRQC_RSS_FIELD_IPV6 |
2753 2896 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2754 2897 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2755 2898 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2756 2899
2757 2900 /*
2758 2901 * Enable VMDq+RSS.
2759 2902 */
2760 2903 if (ixgbe->num_rx_groups > 32) {
2761 2904 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2762 2905 } else {
2763 2906 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2764 2907 }
2765 2908
2766 2909 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2767 2910
2768 2911 for (i = 0; i < hw->mac.num_rar_entries; i++) {
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
2769 2912 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2770 2913 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2771 2914 }
2772 2915 break;
2773 2916
2774 2917 default:
2775 2918 break;
2776 2919
2777 2920 }
2778 2921
2779 - /*
2780 - * Disable Packet Checksum to enable RSS for multiple receive queues.
2781 - * It is an adapter hardware limitation that Packet Checksum is
2782 - * mutually exclusive with RSS.
2783 - */
2784 - rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2785 - rxcsum |= IXGBE_RXCSUM_PCSD;
2786 - rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2787 - IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2788 -
2789 2922 if (hw->mac.type == ixgbe_mac_82599EB ||
2790 - hw->mac.type == ixgbe_mac_X540) {
2923 + hw->mac.type == ixgbe_mac_X540 ||
2924 + hw->mac.type == ixgbe_mac_X550 ||
2925 + hw->mac.type == ixgbe_mac_X550EM_x) {
2791 2926 /*
2792 2927 * Enable Virtualization and Replication.
2793 2928 */
2794 2929 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2795 2930 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2796 2931
2797 2932 /*
2798 2933 * Enable receiving packets to all VFs
2799 2934 */
2800 2935 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2801 2936 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2802 2937 }
2803 2938 }
2804 2939
2805 2940 /*
2941 + * ixgbe_setup_rss_table - Setup RSS table
2942 + */
2943 +static void
2944 +ixgbe_setup_rss_table(ixgbe_t *ixgbe)
2945 +{
2946 + struct ixgbe_hw *hw = &ixgbe->hw;
2947 + uint32_t i, j;
2948 + uint32_t random;
2949 + uint32_t reta;
2950 + uint32_t ring_per_group;
2951 + uint32_t ring;
2952 + uint32_t table_size;
2953 + uint32_t index_mult;
2954 + uint32_t rxcsum;
2955 +
2956 + /*
2957 + * Set multiplier for RETA setup and table size based on MAC type.
2958 + * RETA table sizes vary by model:
2959 + *
2960 + * 82598, 82599, X540: 128 table entries.
2961 + * X550: 512 table entries.
2962 + */
2963 + index_mult = 0x1;
2964 + table_size = 128;
2965 + switch (ixgbe->hw.mac.type) {
2966 + case ixgbe_mac_82598EB:
2967 + index_mult = 0x11;
2968 + break;
2969 + case ixgbe_mac_X550:
2970 + case ixgbe_mac_X550EM_x:
2971 + table_size = 512;
2972 + break;
2973 + default:
2974 + break;
2975 + }
2976 +
2977 + /*
2978 + * Fill out RSS redirection table. The configuation of the indices is
2979 + * hardware-dependent.
2980 + *
2981 + * 82598: 8 bits wide containing two 4 bit RSS indices
2982 + * 82599, X540: 8 bits wide containing one 4 bit RSS index
2983 + * X550: 8 bits wide containing one 6 bit RSS index
2984 + */
2985 + reta = 0;
2986 + ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2987 +
2988 + for (i = 0, j = 0; i < table_size; i++, j++) {
2989 + if (j == ring_per_group) j = 0;
2990 +
2991 + /*
2992 + * The low 8 bits are for hash value (n+0);
2993 + * The next 8 bits are for hash value (n+1), etc.
2994 + */
2995 + ring = (j * index_mult);
2996 + reta = reta >> 8;
2997 + reta = reta | (((uint32_t)ring) << 24);
2998 +
2999 + if ((i & 3) == 3)
3000 + /*
3001 + * The first 128 table entries are programmed into the
3002 + * RETA register, with any beyond that (eg; on X550)
3003 + * into ERETA.
3004 + */
3005 + if (i < 128)
3006 + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3007 + else
3008 + IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3009 + reta);
3010 + reta = 0;
3011 + }
3012 +
3013 + /*
3014 + * Fill out hash function seeds with a random constant
3015 + */
3016 + for (i = 0; i < 10; i++) {
3017 + (void) random_get_pseudo_bytes((uint8_t *)&random,
3018 + sizeof (uint32_t));
3019 + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
3020 + }
3021 +
3022 + /*
3023 + * Disable Packet Checksum to enable RSS for multiple receive queues.
3024 + * It is an adapter hardware limitation that Packet Checksum is
3025 + * mutually exclusive with RSS.
3026 + */
3027 + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3028 + rxcsum |= IXGBE_RXCSUM_PCSD;
3029 + rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
3030 + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3031 +}
3032 +
3033 +/*
2806 3034 * ixgbe_init_unicst - Initialize the unicast addresses.
2807 3035 */
2808 3036 static void
2809 3037 ixgbe_init_unicst(ixgbe_t *ixgbe)
2810 3038 {
2811 3039 struct ixgbe_hw *hw = &ixgbe->hw;
2812 3040 uint8_t *mac_addr;
2813 3041 int slot;
2814 3042 /*
2815 3043 * Here we should consider two situations:
2816 3044 *
2817 3045 * 1. Chipset is initialized at the first time,
2818 3046 * Clear all the multiple unicast addresses.
2819 3047 *
2820 3048 * 2. Chipset is reset
2821 3049 * Recover the multiple unicast addresses from the
2822 3050 * software data structure to the RAR registers.
2823 3051 */
2824 3052 if (!ixgbe->unicst_init) {
2825 3053 /*
2826 3054 * Initialize the multiple unicast addresses
2827 3055 */
2828 3056 ixgbe->unicst_total = hw->mac.num_rar_entries;
2829 3057 ixgbe->unicst_avail = ixgbe->unicst_total;
2830 3058 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2831 3059 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2832 3060 bzero(mac_addr, ETHERADDRL);
2833 3061 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2834 3062 ixgbe->unicst_addr[slot].mac.set = 0;
2835 3063 }
2836 3064 ixgbe->unicst_init = B_TRUE;
2837 3065 } else {
2838 3066 /* Re-configure the RAR registers */
2839 3067 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2840 3068 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2841 3069 if (ixgbe->unicst_addr[slot].mac.set == 1) {
2842 3070 (void) ixgbe_set_rar(hw, slot, mac_addr,
2843 3071 ixgbe->unicst_addr[slot].mac.group_index,
2844 3072 IXGBE_RAH_AV);
2845 3073 } else {
2846 3074 bzero(mac_addr, ETHERADDRL);
2847 3075 (void) ixgbe_set_rar(hw, slot, mac_addr,
2848 3076 NULL, NULL);
2849 3077 }
2850 3078 }
2851 3079 }
2852 3080 }
2853 3081
2854 3082 /*
2855 3083 * ixgbe_unicst_find - Find the slot for the specified unicast address
2856 3084 */
2857 3085 int
2858 3086 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2859 3087 {
2860 3088 int slot;
2861 3089
2862 3090 ASSERT(mutex_owned(&ixgbe->gen_lock));
2863 3091
2864 3092 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2865 3093 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2866 3094 mac_addr, ETHERADDRL) == 0)
2867 3095 return (slot);
2868 3096 }
2869 3097
2870 3098 return (-1);
2871 3099 }
2872 3100
2873 3101 /*
2874 3102 * ixgbe_multicst_add - Add a multicst address.
2875 3103 */
2876 3104 int
2877 3105 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2878 3106 {
2879 3107 ASSERT(mutex_owned(&ixgbe->gen_lock));
2880 3108
2881 3109 if ((multiaddr[0] & 01) == 0) {
2882 3110 return (EINVAL);
2883 3111 }
2884 3112
2885 3113 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2886 3114 return (ENOENT);
2887 3115 }
2888 3116
2889 3117 bcopy(multiaddr,
2890 3118 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2891 3119 ixgbe->mcast_count++;
2892 3120
2893 3121 /*
2894 3122 * Update the multicast table in the hardware
2895 3123 */
2896 3124 ixgbe_setup_multicst(ixgbe);
2897 3125
2898 3126 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2899 3127 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2900 3128 return (EIO);
2901 3129 }
2902 3130
2903 3131 return (0);
2904 3132 }
2905 3133
2906 3134 /*
2907 3135 * ixgbe_multicst_remove - Remove a multicst address.
2908 3136 */
2909 3137 int
2910 3138 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2911 3139 {
2912 3140 int i;
2913 3141
2914 3142 ASSERT(mutex_owned(&ixgbe->gen_lock));
2915 3143
2916 3144 for (i = 0; i < ixgbe->mcast_count; i++) {
2917 3145 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2918 3146 ETHERADDRL) == 0) {
2919 3147 for (i++; i < ixgbe->mcast_count; i++) {
2920 3148 ixgbe->mcast_table[i - 1] =
2921 3149 ixgbe->mcast_table[i];
2922 3150 }
2923 3151 ixgbe->mcast_count--;
2924 3152 break;
2925 3153 }
2926 3154 }
2927 3155
2928 3156 /*
2929 3157 * Update the multicast table in the hardware
2930 3158 */
2931 3159 ixgbe_setup_multicst(ixgbe);
2932 3160
2933 3161 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2934 3162 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2935 3163 return (EIO);
2936 3164 }
2937 3165
2938 3166 return (0);
2939 3167 }
2940 3168
2941 3169 /*
2942 3170 * ixgbe_setup_multicast - Setup multicast data structures.
2943 3171 *
2944 3172 * This routine initializes all of the multicast related structures
2945 3173 * and save them in the hardware registers.
2946 3174 */
2947 3175 static void
2948 3176 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2949 3177 {
2950 3178 uint8_t *mc_addr_list;
2951 3179 uint32_t mc_addr_count;
2952 3180 struct ixgbe_hw *hw = &ixgbe->hw;
2953 3181
2954 3182 ASSERT(mutex_owned(&ixgbe->gen_lock));
2955 3183
2956 3184 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2957 3185
2958 3186 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2959 3187 mc_addr_count = ixgbe->mcast_count;
2960 3188
2961 3189 /*
2962 3190 * Update the multicast addresses to the MTA registers
2963 3191 */
2964 3192 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2965 3193 ixgbe_mc_table_itr, TRUE);
2966 3194 }
2967 3195
2968 3196 /*
2969 3197 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2970 3198 *
2971 3199 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2972 3200 * Different chipsets may have different allowed configuration of vmdq and rss.
2973 3201 */
2974 3202 static void
2975 3203 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2976 3204 {
2977 3205 struct ixgbe_hw *hw = &ixgbe->hw;
2978 3206 uint32_t ring_per_group;
2979 3207
2980 3208 switch (hw->mac.type) {
2981 3209 case ixgbe_mac_82598EB:
2982 3210 /*
2983 3211 * 82598 supports the following combination:
2984 3212 * vmdq no. x rss no.
2985 3213 * [5..16] x 1
2986 3214 * [1..4] x [1..16]
2987 3215 * However 8 rss queue per pool (vmdq) is sufficient for
2988 3216 * most cases.
2989 3217 */
2990 3218 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2991 3219 if (ixgbe->num_rx_groups > 4) {
↓ open down ↓ |
176 lines elided |
↑ open up ↑ |
2992 3220 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2993 3221 } else {
2994 3222 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2995 3223 min(8, ring_per_group);
2996 3224 }
2997 3225
2998 3226 break;
2999 3227
3000 3228 case ixgbe_mac_82599EB:
3001 3229 case ixgbe_mac_X540:
3230 + case ixgbe_mac_X550:
3231 + case ixgbe_mac_X550EM_x:
3002 3232 /*
3003 3233 * 82599 supports the following combination:
3004 3234 * vmdq no. x rss no.
3005 3235 * [33..64] x [1..2]
3006 3236 * [2..32] x [1..4]
3007 3237 * 1 x [1..16]
3008 3238 * However 8 rss queue per pool (vmdq) is sufficient for
3009 3239 * most cases.
3010 3240 *
3011 - * For now, treat X540 like the 82599.
3241 + * For now, treat X540 and X550 like the 82599.
3012 3242 */
3013 3243 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3014 3244 if (ixgbe->num_rx_groups == 1) {
3015 3245 ixgbe->num_rx_rings = min(8, ring_per_group);
3016 3246 } else if (ixgbe->num_rx_groups <= 32) {
3017 3247 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3018 3248 min(4, ring_per_group);
3019 3249 } else if (ixgbe->num_rx_groups <= 64) {
3020 3250 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3021 3251 min(2, ring_per_group);
3022 3252 }
3023 3253 break;
3024 3254
3025 3255 default:
3026 3256 break;
3027 3257 }
3028 3258
3029 3259 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3030 3260
3031 3261 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3032 3262 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3033 3263 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
3034 3264 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
3035 3265 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
3036 3266 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
3037 3267 } else {
3038 3268 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
3039 3269 }
3040 3270
3041 3271 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
3042 3272 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
3043 3273 }
3044 3274
3045 3275 /*
3046 3276 * ixgbe_get_conf - Get driver configurations set in driver.conf.
3047 3277 *
3048 3278 * This routine gets user-configured values out of the configuration
3049 3279 * file ixgbe.conf.
3050 3280 *
3051 3281 * For each configurable value, there is a minimum, a maximum, and a
3052 3282 * default.
3053 3283 * If user does not configure a value, use the default.
3054 3284 * If user configures below the minimum, use the minumum.
3055 3285 * If user configures above the maximum, use the maxumum.
3056 3286 */
3057 3287 static void
3058 3288 ixgbe_get_conf(ixgbe_t *ixgbe)
3059 3289 {
3060 3290 struct ixgbe_hw *hw = &ixgbe->hw;
3061 3291 uint32_t flow_control;
3062 3292
3063 3293 /*
3064 3294 * ixgbe driver supports the following user configurations:
3065 3295 *
3066 3296 * Jumbo frame configuration:
3067 3297 * default_mtu
3068 3298 *
3069 3299 * Ethernet flow control configuration:
3070 3300 * flow_control
3071 3301 *
3072 3302 * Multiple rings configurations:
3073 3303 * tx_queue_number
3074 3304 * tx_ring_size
3075 3305 * rx_queue_number
3076 3306 * rx_ring_size
3077 3307 *
3078 3308 * Call ixgbe_get_prop() to get the value for a specific
3079 3309 * configuration parameter.
3080 3310 */
3081 3311
3082 3312 /*
3083 3313 * Jumbo frame configuration - max_frame_size controls host buffer
3084 3314 * allocation, so includes MTU, ethernet header, vlan tag and
3085 3315 * frame check sequence.
3086 3316 */
3087 3317 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3088 3318 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3089 3319
3090 3320 ixgbe->max_frame_size = ixgbe->default_mtu +
3091 3321 sizeof (struct ether_vlan_header) + ETHERFCSL;
3092 3322
3093 3323 /*
3094 3324 * Ethernet flow control configuration
3095 3325 */
3096 3326 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3097 3327 ixgbe_fc_none, 3, ixgbe_fc_none);
3098 3328 if (flow_control == 3)
3099 3329 flow_control = ixgbe_fc_default;
3100 3330
3101 3331 /*
3102 3332 * fc.requested mode is what the user requests. After autoneg,
3103 3333 * fc.current_mode will be the flow_control mode that was negotiated.
3104 3334 */
3105 3335 hw->fc.requested_mode = flow_control;
3106 3336
3107 3337 /*
3108 3338 * Multiple rings configurations
3109 3339 */
3110 3340 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3111 3341 ixgbe->capab->min_tx_que_num,
3112 3342 ixgbe->capab->max_tx_que_num,
3113 3343 ixgbe->capab->def_tx_que_num);
3114 3344 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3115 3345 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3116 3346
3117 3347 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3118 3348 ixgbe->capab->min_rx_que_num,
3119 3349 ixgbe->capab->max_rx_que_num,
3120 3350 ixgbe->capab->def_rx_que_num);
3121 3351 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3122 3352 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3123 3353
3124 3354 /*
3125 3355 * Multiple groups configuration
3126 3356 */
3127 3357 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3128 3358 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3129 3359 ixgbe->capab->def_rx_grp_num);
3130 3360
3131 3361 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3132 3362 0, 1, DEFAULT_MR_ENABLE);
3133 3363
3134 3364 if (ixgbe->mr_enable == B_FALSE) {
3135 3365 ixgbe->num_tx_rings = 1;
3136 3366 ixgbe->num_rx_rings = 1;
3137 3367 ixgbe->num_rx_groups = 1;
3138 3368 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3139 3369 } else {
3140 3370 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3141 3371 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3142 3372 /*
3143 3373 * The combination of num_rx_rings and num_rx_groups
3144 3374 * may be not supported by h/w. We need to adjust
3145 3375 * them to appropriate values.
3146 3376 */
3147 3377 ixgbe_setup_vmdq_rss_conf(ixgbe);
3148 3378 }
3149 3379
3150 3380 /*
3151 3381 * Tunable used to force an interrupt type. The only use is
3152 3382 * for testing of the lesser interrupt types.
3153 3383 * 0 = don't force interrupt type
3154 3384 * 1 = force interrupt type MSI-X
3155 3385 * 2 = force interrupt type MSI
3156 3386 * 3 = force interrupt type Legacy
3157 3387 */
3158 3388 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3159 3389 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3160 3390
3161 3391 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3162 3392 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3163 3393 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
3164 3394 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3165 3395 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3166 3396 0, 1, DEFAULT_LSO_ENABLE);
3167 3397 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3168 3398 0, 1, DEFAULT_LRO_ENABLE);
3169 3399 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3170 3400 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3171 3401 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3172 3402 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3173 3403
3174 - /* Head Write Back not recommended for 82599 and X540 */
3404 + /* Head Write Back not recommended for 82599, X540 and X550 */
3175 3405 if (hw->mac.type == ixgbe_mac_82599EB ||
3176 - hw->mac.type == ixgbe_mac_X540) {
3406 + hw->mac.type == ixgbe_mac_X540 ||
3407 + hw->mac.type == ixgbe_mac_X550 ||
3408 + hw->mac.type == ixgbe_mac_X550EM_x) {
3177 3409 ixgbe->tx_head_wb_enable = B_FALSE;
3178 3410 }
3179 3411
3180 3412 /*
3181 3413 * ixgbe LSO needs the tx h/w checksum support.
3182 3414 * LSO will be disabled if tx h/w checksum is not
3183 3415 * enabled.
3184 3416 */
3185 3417 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3186 3418 ixgbe->lso_enable = B_FALSE;
3187 3419 }
3188 3420
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3189 3421 /*
3190 3422 * ixgbe LRO needs the rx h/w checksum support.
3191 3423 * LRO will be disabled if rx h/w checksum is not
3192 3424 * enabled.
3193 3425 */
3194 3426 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3195 3427 ixgbe->lro_enable = B_FALSE;
3196 3428 }
3197 3429
3198 3430 /*
3199 - * ixgbe LRO only been supported by 82599 and X540 now
3431 + * ixgbe LRO only supported by 82599, X540 and X550
3200 3432 */
3201 3433 if (hw->mac.type == ixgbe_mac_82598EB) {
3202 3434 ixgbe->lro_enable = B_FALSE;
3203 3435 }
3204 3436 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3205 3437 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3206 3438 DEFAULT_TX_COPY_THRESHOLD);
3207 3439 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3208 3440 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3209 3441 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3210 3442 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3211 3443 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3212 3444 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3213 3445 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3214 3446 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3215 3447 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3216 3448
3217 3449 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3218 3450 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
3219 3451 DEFAULT_RX_COPY_THRESHOLD);
3220 3452 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3221 3453 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3222 3454 DEFAULT_RX_LIMIT_PER_INTR);
3223 3455
3224 3456 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3225 3457 ixgbe->capab->min_intr_throttle,
3226 3458 ixgbe->capab->max_intr_throttle,
3227 3459 ixgbe->capab->def_intr_throttle);
3228 3460 /*
3229 - * 82599 and X540 require the interrupt throttling rate is
3230 - * a multiple of 8. This is enforced by the register
3231 - * definiton.
3461 + * 82599, X540 and X550 require the interrupt throttling rate is
3462 + * a multiple of 8. This is enforced by the register definiton.
3232 3463 */
3233 - if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540)
3464 + if (hw->mac.type == ixgbe_mac_82599EB ||
3465 + hw->mac.type == ixgbe_mac_X540 ||
3466 + hw->mac.type == ixgbe_mac_X550 ||
3467 + hw->mac.type == ixgbe_mac_X550EM_x)
3234 3468 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3235 3469
3236 3470 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3237 3471 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3238 3472 }
3239 3473
3240 3474 static void
3241 3475 ixgbe_init_params(ixgbe_t *ixgbe)
3242 3476 {
3243 - ixgbe->param_en_10000fdx_cap = 1;
3244 - ixgbe->param_en_1000fdx_cap = 1;
3245 - ixgbe->param_en_100fdx_cap = 1;
3246 - ixgbe->param_adv_10000fdx_cap = 1;
3247 - ixgbe->param_adv_1000fdx_cap = 1;
3248 - ixgbe->param_adv_100fdx_cap = 1;
3477 + struct ixgbe_hw *hw = &ixgbe->hw;
3478 + ixgbe_link_speed speeds_supported = 0;
3479 + boolean_t negotiate;
3249 3480
3481 + /*
3482 + * Get a list of speeds the adapter supports. If the hw struct hasn't
3483 + * been populated with this information yet, retrieve it from the
3484 + * adapter and save it to our own variable.
3485 + *
3486 + * On certain adapters, such as ones which use SFPs, the contents of
3487 + * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not
3488 + * updated, so we must rely on calling ixgbe_get_link_capabilities()
3489 + * in order to ascertain the speeds which we are capable of supporting,
3490 + * and in the case of SFP-equipped adapters, which speed we are
3491 + * advertising. If ixgbe_get_link_capabilities() fails for some reason,
3492 + * we'll go with a default list of speeds as a last resort.
3493 + */
3494 + speeds_supported = hw->phy.speeds_supported;
3495 +
3496 + if (speeds_supported == 0) {
3497 + if (ixgbe_get_link_capabilities(hw, &speeds_supported,
3498 + &negotiate) != IXGBE_SUCCESS) {
3499 + if (hw->mac.type == ixgbe_mac_82598EB) {
3500 + speeds_supported =
3501 + IXGBE_LINK_SPEED_82598_AUTONEG;
3502 + } else {
3503 + speeds_supported =
3504 + IXGBE_LINK_SPEED_82599_AUTONEG;
3505 + }
3506 + }
3507 + }
3508 + ixgbe->speeds_supported = speeds_supported;
3509 +
3510 + /*
3511 + * By default, all supported speeds are enabled and advertised.
3512 + */
3513 + if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) {
3514 + ixgbe->param_en_10000fdx_cap = 1;
3515 + ixgbe->param_adv_10000fdx_cap = 1;
3516 + } else {
3517 + ixgbe->param_en_10000fdx_cap = 0;
3518 + ixgbe->param_adv_10000fdx_cap = 0;
3519 + }
3520 +
3521 + if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) {
3522 + ixgbe->param_en_5000fdx_cap = 1;
3523 + ixgbe->param_adv_5000fdx_cap = 1;
3524 + } else {
3525 + ixgbe->param_en_5000fdx_cap = 0;
3526 + ixgbe->param_adv_5000fdx_cap = 0;
3527 + }
3528 +
3529 + if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) {
3530 + ixgbe->param_en_2500fdx_cap = 1;
3531 + ixgbe->param_adv_2500fdx_cap = 1;
3532 + } else {
3533 + ixgbe->param_en_2500fdx_cap = 0;
3534 + ixgbe->param_adv_2500fdx_cap = 0;
3535 + }
3536 +
3537 + if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) {
3538 + ixgbe->param_en_1000fdx_cap = 1;
3539 + ixgbe->param_adv_1000fdx_cap = 1;
3540 + } else {
3541 + ixgbe->param_en_1000fdx_cap = 0;
3542 + ixgbe->param_adv_1000fdx_cap = 0;
3543 + }
3544 +
3545 + if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) {
3546 + ixgbe->param_en_100fdx_cap = 1;
3547 + ixgbe->param_adv_100fdx_cap = 1;
3548 + } else {
3549 + ixgbe->param_en_100fdx_cap = 0;
3550 + ixgbe->param_adv_100fdx_cap = 0;
3551 + }
3552 +
3250 3553 ixgbe->param_pause_cap = 1;
3251 3554 ixgbe->param_asym_pause_cap = 1;
3252 3555 ixgbe->param_rem_fault = 0;
3253 3556
3254 3557 ixgbe->param_adv_autoneg_cap = 1;
3255 3558 ixgbe->param_adv_pause_cap = 1;
3256 3559 ixgbe->param_adv_asym_pause_cap = 1;
3257 3560 ixgbe->param_adv_rem_fault = 0;
3258 3561
3259 3562 ixgbe->param_lp_10000fdx_cap = 0;
3563 + ixgbe->param_lp_5000fdx_cap = 0;
3564 + ixgbe->param_lp_2500fdx_cap = 0;
3260 3565 ixgbe->param_lp_1000fdx_cap = 0;
3261 3566 ixgbe->param_lp_100fdx_cap = 0;
3262 3567 ixgbe->param_lp_autoneg_cap = 0;
3263 3568 ixgbe->param_lp_pause_cap = 0;
3264 3569 ixgbe->param_lp_asym_pause_cap = 0;
3265 3570 ixgbe->param_lp_rem_fault = 0;
3266 3571 }
3267 3572
3268 3573 /*
3269 3574 * ixgbe_get_prop - Get a property value out of the configuration file
3270 3575 * ixgbe.conf.
3271 3576 *
3272 3577 * Caller provides the name of the property, a default value, a minimum
3273 3578 * value, and a maximum value.
3274 3579 *
3275 3580 * Return configured value of the property, with default, minimum and
3276 3581 * maximum properly applied.
3277 3582 */
3278 3583 static int
3279 3584 ixgbe_get_prop(ixgbe_t *ixgbe,
3280 3585 char *propname, /* name of the property */
3281 3586 int minval, /* minimum acceptable value */
3282 3587 int maxval, /* maximim acceptable value */
3283 3588 int defval) /* default value */
3284 3589 {
3285 3590 int value;
3286 3591
3287 3592 /*
3288 3593 * Call ddi_prop_get_int() to read the conf settings
3289 3594 */
3290 3595 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3291 3596 DDI_PROP_DONTPASS, propname, defval);
3292 3597 if (value > maxval)
3293 3598 value = maxval;
3294 3599
3295 3600 if (value < minval)
3296 3601 value = minval;
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
3297 3602
3298 3603 return (value);
3299 3604 }
3300 3605
3301 3606 /*
3302 3607 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3303 3608 */
3304 3609 int
3305 3610 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3306 3611 {
3307 - u32 autoneg_advertised = 0;
3612 + struct ixgbe_hw *hw = &ixgbe->hw;
3613 + ixgbe_link_speed advertised = 0;
3308 3614
3309 3615 /*
3310 - * No half duplex support with 10Gb parts
3616 + * Assemble a list of enabled speeds to auto-negotiate with.
3311 3617 */
3312 - if (ixgbe->param_adv_10000fdx_cap == 1)
3313 - autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3618 + if (ixgbe->param_en_10000fdx_cap == 1)
3619 + advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3314 3620
3315 - if (ixgbe->param_adv_1000fdx_cap == 1)
3316 - autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3621 + if (ixgbe->param_en_5000fdx_cap == 1)
3622 + advertised |= IXGBE_LINK_SPEED_5GB_FULL;
3317 3623
3318 - if (ixgbe->param_adv_100fdx_cap == 1)
3319 - autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3624 + if (ixgbe->param_en_2500fdx_cap == 1)
3625 + advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
3320 3626
3321 - if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3322 - ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3323 - "to autonegotiation with full link capabilities.");
3627 + if (ixgbe->param_en_1000fdx_cap == 1)
3628 + advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3324 3629
3325 - autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3326 - IXGBE_LINK_SPEED_1GB_FULL |
3327 - IXGBE_LINK_SPEED_100_FULL;
3630 + if (ixgbe->param_en_100fdx_cap == 1)
3631 + advertised |= IXGBE_LINK_SPEED_100_FULL;
3632 +
3633 + /*
3634 + * As a last resort, autoneg with a default list of speeds.
3635 + */
3636 + if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) {
3637 + ixgbe_notice(ixgbe, "Invalid link settings. Setting link "
3638 + "to autonegotiate with full capabilities.");
3639 +
3640 + if (hw->mac.type == ixgbe_mac_82598EB)
3641 + advertised = IXGBE_LINK_SPEED_82598_AUTONEG;
3642 + else
3643 + advertised = IXGBE_LINK_SPEED_82599_AUTONEG;
3328 3644 }
3329 3645
3330 3646 if (setup_hw) {
3331 - if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3332 - ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3647 + if (ixgbe_setup_link(&ixgbe->hw, advertised,
3648 + ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) {
3333 3649 ixgbe_notice(ixgbe, "Setup link failed on this "
3334 3650 "device.");
3335 3651 return (IXGBE_FAILURE);
3336 3652 }
3337 3653 }
3338 3654
3339 3655 return (IXGBE_SUCCESS);
3340 3656 }
3341 3657
3342 3658 /*
3343 3659 * ixgbe_driver_link_check - Link status processing.
3344 3660 *
3345 3661 * This function can be called in both kernel context and interrupt context
3346 3662 */
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
3347 3663 static void
3348 3664 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3349 3665 {
3350 3666 struct ixgbe_hw *hw = &ixgbe->hw;
3351 3667 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3352 3668 boolean_t link_up = B_FALSE;
3353 3669 boolean_t link_changed = B_FALSE;
3354 3670
3355 3671 ASSERT(mutex_owned(&ixgbe->gen_lock));
3356 3672
3357 - (void) ixgbe_check_link(hw, &speed, &link_up, false);
3673 + (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3358 3674 if (link_up) {
3359 3675 ixgbe->link_check_complete = B_TRUE;
3360 3676
3361 3677 /* Link is up, enable flow control settings */
3362 3678 (void) ixgbe_fc_enable(hw);
3363 3679
3364 3680 /*
3365 3681 * The Link is up, check whether it was marked as down earlier
3366 3682 */
3367 3683 if (ixgbe->link_state != LINK_STATE_UP) {
3368 3684 switch (speed) {
3369 3685 case IXGBE_LINK_SPEED_10GB_FULL:
3370 3686 ixgbe->link_speed = SPEED_10GB;
3371 3687 break;
3688 + case IXGBE_LINK_SPEED_5GB_FULL:
3689 + ixgbe->link_speed = SPEED_5GB;
3690 + break;
3691 + case IXGBE_LINK_SPEED_2_5GB_FULL:
3692 + ixgbe->link_speed = SPEED_2_5GB;
3693 + break;
3372 3694 case IXGBE_LINK_SPEED_1GB_FULL:
3373 3695 ixgbe->link_speed = SPEED_1GB;
3374 3696 break;
3375 3697 case IXGBE_LINK_SPEED_100_FULL:
3376 3698 ixgbe->link_speed = SPEED_100;
3377 3699 }
3378 3700 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3379 3701 ixgbe->link_state = LINK_STATE_UP;
3380 3702 link_changed = B_TRUE;
3381 3703 }
3382 3704 } else {
3383 3705 if (ixgbe->link_check_complete == B_TRUE ||
3384 3706 (ixgbe->link_check_complete == B_FALSE &&
3385 3707 gethrtime() >= ixgbe->link_check_hrtime)) {
3386 3708 /*
3387 3709 * The link is really down
3388 3710 */
3389 3711 ixgbe->link_check_complete = B_TRUE;
3390 3712
3391 3713 if (ixgbe->link_state != LINK_STATE_DOWN) {
3392 3714 ixgbe->link_speed = 0;
3393 3715 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3394 3716 ixgbe->link_state = LINK_STATE_DOWN;
3395 3717 link_changed = B_TRUE;
3396 3718 }
3397 3719 }
3398 3720 }
3399 3721
3400 3722 /*
3401 3723 * If we are in an interrupt context, need to re-enable the
3402 3724 * interrupt, which was automasked
3403 3725 */
3404 3726 if (servicing_interrupt() != 0) {
3405 3727 ixgbe->eims |= IXGBE_EICR_LSC;
3406 3728 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3407 3729 }
3408 3730
3409 3731 if (link_changed) {
3410 3732 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3411 3733 }
3412 3734 }
3413 3735
3414 3736 /*
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
3415 3737 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3416 3738 */
3417 3739 static void
3418 3740 ixgbe_sfp_check(void *arg)
3419 3741 {
3420 3742 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3421 3743 uint32_t eicr = ixgbe->eicr;
3422 3744 struct ixgbe_hw *hw = &ixgbe->hw;
3423 3745
3424 3746 mutex_enter(&ixgbe->gen_lock);
3425 - if (eicr & IXGBE_EICR_GPI_SDP1) {
3747 + if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
3426 3748 /* clear the interrupt */
3427 - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3749 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3428 3750
3429 3751 /* if link up, do multispeed fiber setup */
3430 3752 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3431 - B_TRUE, B_TRUE);
3753 + B_TRUE);
3432 3754 ixgbe_driver_link_check(ixgbe);
3433 3755 ixgbe_get_hw_state(ixgbe);
3434 - } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3756 + } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) {
3435 3757 /* clear the interrupt */
3436 - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3758 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
3437 3759
3438 3760 /* if link up, do sfp module setup */
3439 3761 (void) hw->mac.ops.setup_sfp(hw);
3440 3762
3441 3763 /* do multispeed fiber setup */
3442 3764 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3443 - B_TRUE, B_TRUE);
3765 + B_TRUE);
3444 3766 ixgbe_driver_link_check(ixgbe);
3445 3767 ixgbe_get_hw_state(ixgbe);
3446 3768 }
3447 3769 mutex_exit(&ixgbe->gen_lock);
3448 3770
3449 3771 /*
3450 3772 * We need to fully re-check the link later.
3451 3773 */
3452 3774 ixgbe->link_check_complete = B_FALSE;
3453 3775 ixgbe->link_check_hrtime = gethrtime() +
3454 3776 (IXGBE_LINK_UP_TIME * 100000000ULL);
3455 3777 }
3456 3778
3457 3779 /*
3458 3780 * ixgbe_overtemp_check - overtemp module processing done in taskq
3459 3781 *
3460 3782 * This routine will only be called on adapters with temperature sensor.
3461 3783 * The indication of over-temperature can be either SDP0 interrupt or the link
3462 3784 * status change interrupt.
3463 3785 */
3464 3786 static void
3465 3787 ixgbe_overtemp_check(void *arg)
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
3466 3788 {
3467 3789 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3468 3790 struct ixgbe_hw *hw = &ixgbe->hw;
3469 3791 uint32_t eicr = ixgbe->eicr;
3470 3792 ixgbe_link_speed speed;
3471 3793 boolean_t link_up;
3472 3794
3473 3795 mutex_enter(&ixgbe->gen_lock);
3474 3796
3475 3797 /* make sure we know current state of link */
3476 - (void) ixgbe_check_link(hw, &speed, &link_up, false);
3798 + (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3477 3799
3478 3800 /* check over-temp condition */
3479 - if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3801 + if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) ||
3480 3802 (eicr & IXGBE_EICR_LSC)) {
3481 3803 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3482 3804 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3483 3805
3484 3806 /*
3485 3807 * Disable the adapter interrupts
3486 3808 */
3487 3809 ixgbe_disable_adapter_interrupts(ixgbe);
3488 3810
3489 3811 /*
3490 3812 * Disable Rx/Tx units
3491 3813 */
3492 3814 (void) ixgbe_stop_adapter(hw);
3493 3815
3494 3816 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3495 3817 ixgbe_error(ixgbe,
3496 3818 "Problem: Network adapter has been stopped "
3497 3819 "because it has overheated");
3498 3820 ixgbe_error(ixgbe,
3499 3821 "Action: Restart the computer. "
3500 3822 "If the problem persists, power off the system "
3501 3823 "and replace the adapter");
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
3502 3824 }
3503 3825 }
3504 3826
3505 3827 /* write to clear the interrupt */
3506 3828 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3507 3829
3508 3830 mutex_exit(&ixgbe->gen_lock);
3509 3831 }
3510 3832
3511 3833 /*
3834 + * ixgbe_phy_check - taskq to process interrupts from an external PHY
3835 + *
3836 + * This routine will only be called on adapters with external PHYs
3837 + * (such as X550) that may be trying to raise our attention to some event.
3838 + * Currently, this is limited to claiming PHY overtemperature and link status
3839 + * change (LSC) events, however this may expand to include other things in
3840 + * future adapters.
3841 + */
3842 +static void
3843 +ixgbe_phy_check(void *arg)
3844 +{
3845 + ixgbe_t *ixgbe = (ixgbe_t *)arg;
3846 + struct ixgbe_hw *hw = &ixgbe->hw;
3847 + int rv;
3848 +
3849 + mutex_enter(&ixgbe->gen_lock);
3850 +
3851 + /*
3852 + * X550 baseT PHY overtemp and LSC events are handled here.
3853 + *
3854 + * If an overtemp event occurs, it will be reflected in the
3855 + * return value of phy.ops.handle_lasi() and the common code will
3856 + * automatically power off the baseT PHY. This is our cue to trigger
3857 + * an FMA event.
3858 + *
3859 + * If a link status change event occurs, phy.ops.handle_lasi() will
3860 + * automatically initiate a link setup between the integrated KR PHY
3861 + * and the external X557 PHY to ensure that the link speed between
3862 + * them matches the link speed of the baseT link.
3863 + */
3864 + rv = ixgbe_handle_lasi(hw);
3865 +
3866 + if (rv == IXGBE_ERR_OVERTEMP) {
3867 + atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3868 +
3869 + /*
3870 + * Disable the adapter interrupts
3871 + */
3872 + ixgbe_disable_adapter_interrupts(ixgbe);
3873 +
3874 + /*
3875 + * Disable Rx/Tx units
3876 + */
3877 + (void) ixgbe_stop_adapter(hw);
3878 +
3879 + ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3880 + ixgbe_error(ixgbe,
3881 + "Problem: Network adapter has been stopped due to a "
3882 + "overtemperature event being detected.");
3883 + ixgbe_error(ixgbe,
3884 + "Action: Shut down or restart the computer. If the issue "
3885 + "persists, please take action in accordance with the "
3886 + "recommendations from your system vendor.");
3887 + }
3888 +
3889 + mutex_exit(&ixgbe->gen_lock);
3890 +}
3891 +
3892 +/*
3512 3893 * ixgbe_link_timer - timer for link status detection
3513 3894 */
3514 3895 static void
3515 3896 ixgbe_link_timer(void *arg)
3516 3897 {
3517 3898 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3518 3899
3519 3900 mutex_enter(&ixgbe->gen_lock);
3520 3901 ixgbe_driver_link_check(ixgbe);
3521 3902 mutex_exit(&ixgbe->gen_lock);
3522 3903 }
3523 3904
3524 3905 /*
3525 3906 * ixgbe_local_timer - Driver watchdog function.
3526 3907 *
3527 3908 * This function will handle the transmit stall check and other routines.
3528 3909 */
3529 3910 static void
3530 3911 ixgbe_local_timer(void *arg)
3531 3912 {
3532 3913 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3533 3914
3534 3915 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3535 3916 goto out;
3536 3917
3537 3918 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3538 3919 ixgbe->reset_count++;
3539 3920 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3540 3921 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3541 3922 goto out;
3542 3923 }
3543 3924
3544 3925 if (ixgbe_stall_check(ixgbe)) {
3545 3926 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3546 3927 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3547 3928
3548 3929 ixgbe->reset_count++;
3549 3930 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3550 3931 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3551 3932 }
3552 3933
3553 3934 out:
3554 3935 ixgbe_restart_watchdog_timer(ixgbe);
3555 3936 }
3556 3937
3557 3938 /*
3558 3939 * ixgbe_stall_check - Check for transmit stall.
3559 3940 *
3560 3941 * This function checks if the adapter is stalled (in transmit).
3561 3942 *
3562 3943 * It is called each time the watchdog timeout is invoked.
3563 3944 * If the transmit descriptor reclaim continuously fails,
3564 3945 * the watchdog value will increment by 1. If the watchdog
3565 3946 * value exceeds the threshold, the ixgbe is assumed to
3566 3947 * have stalled and need to be reset.
3567 3948 */
3568 3949 static boolean_t
3569 3950 ixgbe_stall_check(ixgbe_t *ixgbe)
3570 3951 {
3571 3952 ixgbe_tx_ring_t *tx_ring;
3572 3953 boolean_t result;
3573 3954 int i;
3574 3955
3575 3956 if (ixgbe->link_state != LINK_STATE_UP)
3576 3957 return (B_FALSE);
3577 3958
3578 3959 /*
3579 3960 * If any tx ring is stalled, we'll reset the chipset
3580 3961 */
3581 3962 result = B_FALSE;
3582 3963 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3583 3964 tx_ring = &ixgbe->tx_rings[i];
3584 3965 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3585 3966 tx_ring->tx_recycle(tx_ring);
3586 3967 }
3587 3968
3588 3969 if (tx_ring->recycle_fail > 0)
3589 3970 tx_ring->stall_watchdog++;
3590 3971 else
3591 3972 tx_ring->stall_watchdog = 0;
3592 3973
3593 3974 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3594 3975 result = B_TRUE;
3595 3976 break;
3596 3977 }
3597 3978 }
3598 3979
3599 3980 if (result) {
3600 3981 tx_ring->stall_watchdog = 0;
3601 3982 tx_ring->recycle_fail = 0;
3602 3983 }
3603 3984
3604 3985 return (result);
3605 3986 }
3606 3987
3607 3988
3608 3989 /*
3609 3990 * is_valid_mac_addr - Check if the mac address is valid.
3610 3991 */
3611 3992 static boolean_t
3612 3993 is_valid_mac_addr(uint8_t *mac_addr)
3613 3994 {
3614 3995 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3615 3996 const uint8_t addr_test2[6] =
3616 3997 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3617 3998
3618 3999 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3619 4000 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3620 4001 return (B_FALSE);
3621 4002
3622 4003 return (B_TRUE);
3623 4004 }
3624 4005
3625 4006 static boolean_t
3626 4007 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3627 4008 {
3628 4009 #ifdef __sparc
3629 4010 struct ixgbe_hw *hw = &ixgbe->hw;
3630 4011 uchar_t *bytes;
3631 4012 struct ether_addr sysaddr;
3632 4013 uint_t nelts;
3633 4014 int err;
3634 4015 boolean_t found = B_FALSE;
3635 4016
3636 4017 /*
3637 4018 * The "vendor's factory-set address" may already have
3638 4019 * been extracted from the chip, but if the property
3639 4020 * "local-mac-address" is set we use that instead.
3640 4021 *
3641 4022 * We check whether it looks like an array of 6
3642 4023 * bytes (which it should, if OBP set it). If we can't
3643 4024 * make sense of it this way, we'll ignore it.
3644 4025 */
3645 4026 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3646 4027 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3647 4028 if (err == DDI_PROP_SUCCESS) {
3648 4029 if (nelts == ETHERADDRL) {
3649 4030 while (nelts--)
3650 4031 hw->mac.addr[nelts] = bytes[nelts];
3651 4032 found = B_TRUE;
3652 4033 }
3653 4034 ddi_prop_free(bytes);
3654 4035 }
3655 4036
3656 4037 /*
3657 4038 * Look up the OBP property "local-mac-address?". If the user has set
3658 4039 * 'local-mac-address? = false', use "the system address" instead.
3659 4040 */
3660 4041 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3661 4042 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3662 4043 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3663 4044 if (localetheraddr(NULL, &sysaddr) != 0) {
3664 4045 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
↓ open down ↓ |
143 lines elided |
↑ open up ↑ |
3665 4046 found = B_TRUE;
3666 4047 }
3667 4048 }
3668 4049 ddi_prop_free(bytes);
3669 4050 }
3670 4051
3671 4052 /*
3672 4053 * Finally(!), if there's a valid "mac-address" property (created
3673 4054 * if we netbooted from this interface), we must use this instead
3674 4055 * of any of the above to ensure that the NFS/install server doesn't
3675 - * get confused by the address changing as Solaris takes over!
4056 + * get confused by the address changing as illumos takes over!
3676 4057 */
3677 4058 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3678 4059 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3679 4060 if (err == DDI_PROP_SUCCESS) {
3680 4061 if (nelts == ETHERADDRL) {
3681 4062 while (nelts--)
3682 4063 hw->mac.addr[nelts] = bytes[nelts];
3683 4064 found = B_TRUE;
3684 4065 }
3685 4066 ddi_prop_free(bytes);
3686 4067 }
3687 4068
3688 4069 if (found) {
3689 4070 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3690 4071 return (B_TRUE);
3691 4072 }
3692 4073 #else
3693 4074 _NOTE(ARGUNUSED(ixgbe));
3694 4075 #endif
3695 4076
3696 4077 return (B_TRUE);
3697 4078 }
3698 4079
3699 4080 #pragma inline(ixgbe_arm_watchdog_timer)
3700 4081 static void
3701 4082 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3702 4083 {
3703 4084 /*
3704 4085 * Fire a watchdog timer
3705 4086 */
3706 4087 ixgbe->watchdog_tid =
3707 4088 timeout(ixgbe_local_timer,
3708 4089 (void *)ixgbe, 1 * drv_usectohz(1000000));
3709 4090
3710 4091 }
3711 4092
3712 4093 /*
3713 4094 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3714 4095 */
3715 4096 void
3716 4097 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3717 4098 {
3718 4099 mutex_enter(&ixgbe->watchdog_lock);
3719 4100
3720 4101 if (!ixgbe->watchdog_enable) {
3721 4102 ixgbe->watchdog_enable = B_TRUE;
3722 4103 ixgbe->watchdog_start = B_TRUE;
3723 4104 ixgbe_arm_watchdog_timer(ixgbe);
3724 4105 }
3725 4106
3726 4107 mutex_exit(&ixgbe->watchdog_lock);
3727 4108 }
3728 4109
3729 4110 /*
3730 4111 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3731 4112 */
3732 4113 void
3733 4114 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3734 4115 {
3735 4116 timeout_id_t tid;
3736 4117
3737 4118 mutex_enter(&ixgbe->watchdog_lock);
3738 4119
3739 4120 ixgbe->watchdog_enable = B_FALSE;
3740 4121 ixgbe->watchdog_start = B_FALSE;
3741 4122 tid = ixgbe->watchdog_tid;
3742 4123 ixgbe->watchdog_tid = 0;
3743 4124
3744 4125 mutex_exit(&ixgbe->watchdog_lock);
3745 4126
3746 4127 if (tid != 0)
3747 4128 (void) untimeout(tid);
3748 4129 }
3749 4130
3750 4131 /*
3751 4132 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3752 4133 */
3753 4134 void
3754 4135 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3755 4136 {
3756 4137 mutex_enter(&ixgbe->watchdog_lock);
3757 4138
3758 4139 if (ixgbe->watchdog_enable) {
3759 4140 if (!ixgbe->watchdog_start) {
3760 4141 ixgbe->watchdog_start = B_TRUE;
3761 4142 ixgbe_arm_watchdog_timer(ixgbe);
3762 4143 }
3763 4144 }
3764 4145
3765 4146 mutex_exit(&ixgbe->watchdog_lock);
3766 4147 }
3767 4148
3768 4149 /*
3769 4150 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3770 4151 */
3771 4152 static void
3772 4153 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3773 4154 {
3774 4155 mutex_enter(&ixgbe->watchdog_lock);
3775 4156
3776 4157 if (ixgbe->watchdog_start)
3777 4158 ixgbe_arm_watchdog_timer(ixgbe);
3778 4159
3779 4160 mutex_exit(&ixgbe->watchdog_lock);
3780 4161 }
3781 4162
3782 4163 /*
3783 4164 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3784 4165 */
3785 4166 void
3786 4167 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3787 4168 {
3788 4169 timeout_id_t tid;
3789 4170
3790 4171 mutex_enter(&ixgbe->watchdog_lock);
3791 4172
3792 4173 ixgbe->watchdog_start = B_FALSE;
3793 4174 tid = ixgbe->watchdog_tid;
3794 4175 ixgbe->watchdog_tid = 0;
3795 4176
3796 4177 mutex_exit(&ixgbe->watchdog_lock);
3797 4178
3798 4179 if (tid != 0)
3799 4180 (void) untimeout(tid);
3800 4181 }
3801 4182
3802 4183 /*
3803 4184 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3804 4185 */
3805 4186 static void
3806 4187 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3807 4188 {
3808 4189 struct ixgbe_hw *hw = &ixgbe->hw;
3809 4190
3810 4191 /*
3811 4192 * mask all interrupts off
3812 4193 */
3813 4194 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3814 4195
3815 4196 /*
3816 4197 * for MSI-X, also disable autoclear
3817 4198 */
3818 4199 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3819 4200 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3820 4201 }
3821 4202
3822 4203 IXGBE_WRITE_FLUSH(hw);
3823 4204 }
3824 4205
3825 4206 /*
3826 4207 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3827 4208 */
3828 4209 static void
3829 4210 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3830 4211 {
3831 4212 struct ixgbe_hw *hw = &ixgbe->hw;
3832 4213 uint32_t eiac, eiam;
3833 4214 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3834 4215
3835 4216 /* interrupt types to enable */
3836 4217 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
3837 4218 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
3838 4219 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3839 4220
3840 4221 /* enable automask on "other" causes that this adapter can generate */
3841 4222 eiam = ixgbe->capab->other_intr;
3842 4223
3843 4224 /*
3844 4225 * msi-x mode
3845 4226 */
3846 4227 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3847 4228 /* enable autoclear but not on bits 29:20 */
3848 4229 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3849 4230
3850 4231 /* general purpose interrupt enable */
3851 4232 gpie |= (IXGBE_GPIE_MSIX_MODE
3852 4233 | IXGBE_GPIE_PBA_SUPPORT
3853 4234 | IXGBE_GPIE_OCD
3854 4235 | IXGBE_GPIE_EIAME);
↓ open down ↓ |
169 lines elided |
↑ open up ↑ |
3855 4236 /*
3856 4237 * non-msi-x mode
3857 4238 */
3858 4239 } else {
3859 4240
3860 4241 /* disable autoclear, leave gpie at default */
3861 4242 eiac = 0;
3862 4243
3863 4244 /*
3864 4245 * General purpose interrupt enable.
3865 - * For 82599 or X540, extended interrupt automask enable
3866 - * only in MSI or MSI-X mode
4246 + * For 82599, X540 and X550, extended interrupt
4247 + * automask enable only in MSI or MSI-X mode
3867 4248 */
3868 4249 if ((hw->mac.type == ixgbe_mac_82598EB) ||
3869 4250 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3870 4251 gpie |= IXGBE_GPIE_EIAME;
3871 4252 }
3872 4253 }
3873 4254
3874 4255 /* Enable specific "other" interrupt types */
3875 4256 switch (hw->mac.type) {
3876 4257 case ixgbe_mac_82598EB:
3877 4258 gpie |= ixgbe->capab->other_gpie;
3878 4259 break;
3879 4260
3880 4261 case ixgbe_mac_82599EB:
3881 4262 case ixgbe_mac_X540:
4263 + case ixgbe_mac_X550:
4264 + case ixgbe_mac_X550EM_x:
3882 4265 gpie |= ixgbe->capab->other_gpie;
3883 4266
3884 4267 /* Enable RSC Delay 8us when LRO enabled */
3885 4268 if (ixgbe->lro_enable) {
3886 4269 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3887 4270 }
3888 4271 break;
3889 4272
3890 4273 default:
3891 4274 break;
3892 4275 }
3893 4276
3894 4277 /* write to interrupt control registers */
3895 4278 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3896 4279 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3897 4280 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3898 4281 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3899 4282 IXGBE_WRITE_FLUSH(hw);
3900 4283 }
3901 4284
3902 4285 /*
3903 4286 * ixgbe_loopback_ioctl - Loopback support.
3904 4287 */
3905 4288 enum ioc_reply
3906 4289 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3907 4290 {
3908 4291 lb_info_sz_t *lbsp;
3909 4292 lb_property_t *lbpp;
3910 4293 uint32_t *lbmp;
3911 4294 uint32_t size;
3912 4295 uint32_t value;
3913 4296
3914 4297 if (mp->b_cont == NULL)
3915 4298 return (IOC_INVAL);
3916 4299
3917 4300 switch (iocp->ioc_cmd) {
3918 4301 default:
3919 4302 return (IOC_INVAL);
3920 4303
3921 4304 case LB_GET_INFO_SIZE:
3922 4305 size = sizeof (lb_info_sz_t);
3923 4306 if (iocp->ioc_count != size)
3924 4307 return (IOC_INVAL);
3925 4308
3926 4309 value = sizeof (lb_normal);
3927 4310 value += sizeof (lb_mac);
3928 4311 value += sizeof (lb_external);
3929 4312
3930 4313 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3931 4314 *lbsp = value;
3932 4315 break;
3933 4316
3934 4317 case LB_GET_INFO:
3935 4318 value = sizeof (lb_normal);
3936 4319 value += sizeof (lb_mac);
3937 4320 value += sizeof (lb_external);
3938 4321
3939 4322 size = value;
3940 4323 if (iocp->ioc_count != size)
3941 4324 return (IOC_INVAL);
3942 4325
3943 4326 value = 0;
3944 4327 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3945 4328
3946 4329 lbpp[value++] = lb_normal;
3947 4330 lbpp[value++] = lb_mac;
3948 4331 lbpp[value++] = lb_external;
3949 4332 break;
3950 4333
3951 4334 case LB_GET_MODE:
3952 4335 size = sizeof (uint32_t);
3953 4336 if (iocp->ioc_count != size)
3954 4337 return (IOC_INVAL);
3955 4338
3956 4339 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3957 4340 *lbmp = ixgbe->loopback_mode;
3958 4341 break;
3959 4342
3960 4343 case LB_SET_MODE:
3961 4344 size = 0;
3962 4345 if (iocp->ioc_count != sizeof (uint32_t))
3963 4346 return (IOC_INVAL);
3964 4347
3965 4348 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3966 4349 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3967 4350 return (IOC_INVAL);
3968 4351 break;
3969 4352 }
3970 4353
3971 4354 iocp->ioc_count = size;
3972 4355 iocp->ioc_error = 0;
3973 4356
3974 4357 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3975 4358 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3976 4359 return (IOC_INVAL);
3977 4360 }
3978 4361
3979 4362 return (IOC_REPLY);
3980 4363 }
3981 4364
3982 4365 /*
3983 4366 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3984 4367 */
3985 4368 static boolean_t
3986 4369 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3987 4370 {
3988 4371 if (mode == ixgbe->loopback_mode)
3989 4372 return (B_TRUE);
3990 4373
3991 4374 ixgbe->loopback_mode = mode;
3992 4375
3993 4376 if (mode == IXGBE_LB_NONE) {
3994 4377 /*
3995 4378 * Reset the chip
3996 4379 */
3997 4380 (void) ixgbe_reset(ixgbe);
3998 4381 return (B_TRUE);
3999 4382 }
4000 4383
4001 4384 mutex_enter(&ixgbe->gen_lock);
4002 4385
4003 4386 switch (mode) {
4004 4387 default:
4005 4388 mutex_exit(&ixgbe->gen_lock);
4006 4389 return (B_FALSE);
4007 4390
4008 4391 case IXGBE_LB_EXTERNAL:
4009 4392 break;
4010 4393
4011 4394 case IXGBE_LB_INTERNAL_MAC:
4012 4395 ixgbe_set_internal_mac_loopback(ixgbe);
4013 4396 break;
4014 4397 }
4015 4398
4016 4399 mutex_exit(&ixgbe->gen_lock);
4017 4400
4018 4401 return (B_TRUE);
4019 4402 }
4020 4403
4021 4404 /*
4022 4405 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
4023 4406 */
4024 4407 static void
4025 4408 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
4026 4409 {
4027 4410 struct ixgbe_hw *hw;
4028 4411 uint32_t reg;
4029 4412 uint8_t atlas;
4030 4413
4031 4414 hw = &ixgbe->hw;
4032 4415
4033 4416 /*
4034 4417 * Setup MAC loopback
4035 4418 */
4036 4419 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
4037 4420 reg |= IXGBE_HLREG0_LPBK;
4038 4421 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
4039 4422
4040 4423 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4041 4424 reg &= ~IXGBE_AUTOC_LMS_MASK;
4042 4425 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4043 4426
4044 4427 /*
4045 4428 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
4046 4429 */
4047 4430 switch (hw->mac.type) {
4048 4431 case ixgbe_mac_82598EB:
4049 4432 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4050 4433 &atlas);
4051 4434 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
4052 4435 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4053 4436 atlas);
4054 4437
4055 4438 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4056 4439 &atlas);
4057 4440 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4058 4441 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4059 4442 atlas);
4060 4443
4061 4444 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4062 4445 &atlas);
4063 4446 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4064 4447 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4065 4448 atlas);
↓ open down ↓ |
174 lines elided |
↑ open up ↑ |
4066 4449
4067 4450 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4068 4451 &atlas);
4069 4452 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4070 4453 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4071 4454 atlas);
4072 4455 break;
4073 4456
4074 4457 case ixgbe_mac_82599EB:
4075 4458 case ixgbe_mac_X540:
4459 + case ixgbe_mac_X550:
4460 + case ixgbe_mac_X550EM_x:
4076 4461 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4077 4462 reg |= (IXGBE_AUTOC_FLU |
4078 4463 IXGBE_AUTOC_10G_KX4);
4079 4464 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4080 4465
4081 4466 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4082 - B_FALSE, B_TRUE);
4467 + B_FALSE);
4083 4468 break;
4084 4469
4085 4470 default:
4086 4471 break;
4087 4472 }
4088 4473 }
4089 4474
4090 4475 #pragma inline(ixgbe_intr_rx_work)
4091 4476 /*
4092 4477 * ixgbe_intr_rx_work - RX processing of ISR.
4093 4478 */
4094 4479 static void
4095 4480 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4096 4481 {
4097 4482 mblk_t *mp;
4098 4483
4099 4484 mutex_enter(&rx_ring->rx_lock);
4100 4485
4101 4486 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4102 4487 mutex_exit(&rx_ring->rx_lock);
4103 4488
4104 4489 if (mp != NULL)
4105 4490 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4106 4491 rx_ring->ring_gen_num);
4107 4492 }
4108 4493
4109 4494 #pragma inline(ixgbe_intr_tx_work)
4110 4495 /*
4111 4496 * ixgbe_intr_tx_work - TX processing of ISR.
4112 4497 */
4113 4498 static void
4114 4499 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4115 4500 {
4116 4501 ixgbe_t *ixgbe = tx_ring->ixgbe;
4117 4502
4118 4503 /*
4119 4504 * Recycle the tx descriptors
4120 4505 */
4121 4506 tx_ring->tx_recycle(tx_ring);
4122 4507
4123 4508 /*
4124 4509 * Schedule the re-transmit
4125 4510 */
4126 4511 if (tx_ring->reschedule &&
4127 4512 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4128 4513 tx_ring->reschedule = B_FALSE;
4129 4514 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4130 4515 tx_ring->ring_handle);
4131 4516 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
4132 4517 }
4133 4518 }
4134 4519
4135 4520 #pragma inline(ixgbe_intr_other_work)
4136 4521 /*
4137 4522 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4138 4523 */
4139 4524 static void
4140 4525 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4141 4526 {
4527 + struct ixgbe_hw *hw = &ixgbe->hw;
4528 +
4142 4529 ASSERT(mutex_owned(&ixgbe->gen_lock));
4143 4530
4144 4531 /*
4145 4532 * handle link status change
4146 4533 */
4147 4534 if (eicr & IXGBE_EICR_LSC) {
4148 4535 ixgbe_driver_link_check(ixgbe);
4149 4536 ixgbe_get_hw_state(ixgbe);
4150 4537 }
4151 4538
4152 4539 /*
4153 4540 * check for fan failure on adapters with fans
4154 4541 */
4155 4542 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4156 4543 (eicr & IXGBE_EICR_GPI_SDP1)) {
4157 4544 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4158 4545
4159 4546 /*
4160 4547 * Disable the adapter interrupts
4161 4548 */
4162 4549 ixgbe_disable_adapter_interrupts(ixgbe);
4163 4550
4164 4551 /*
4165 4552 * Disable Rx/Tx units
4166 4553 */
4167 4554 (void) ixgbe_stop_adapter(&ixgbe->hw);
4168 4555
4169 4556 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4170 4557 ixgbe_error(ixgbe,
4171 4558 "Problem: Network adapter has been stopped "
4172 4559 "because the fan has stopped.\n");
4173 4560 ixgbe_error(ixgbe,
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
4174 4561 "Action: Replace the adapter.\n");
4175 4562
4176 4563 /* re-enable the interrupt, which was automasked */
4177 4564 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4178 4565 }
4179 4566
4180 4567 /*
4181 4568 * Do SFP check for adapters with hot-plug capability
4182 4569 */
4183 4570 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4184 - ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4571 + ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) ||
4572 + (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) {
4185 4573 ixgbe->eicr = eicr;
4186 4574 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4187 4575 ixgbe_sfp_check, (void *)ixgbe,
4188 4576 DDI_NOSLEEP)) != DDI_SUCCESS) {
4189 4577 ixgbe_log(ixgbe, "No memory available to dispatch "
4190 4578 "taskq for SFP check");
4191 4579 }
4192 4580 }
4193 4581
4194 4582 /*
4195 4583 * Do over-temperature check for adapters with temp sensor
4196 4584 */
4197 4585 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4198 - ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4586 + ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) ||
4587 + (eicr & IXGBE_EICR_LSC))) {
4199 4588 ixgbe->eicr = eicr;
4200 4589 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4201 4590 ixgbe_overtemp_check, (void *)ixgbe,
4202 4591 DDI_NOSLEEP)) != DDI_SUCCESS) {
4203 4592 ixgbe_log(ixgbe, "No memory available to dispatch "
4204 4593 "taskq for overtemp check");
4205 4594 }
4206 4595 }
4596 +
4597 + /*
4598 + * Process an external PHY interrupt
4599 + */
4600 + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4601 + (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4602 + ixgbe->eicr = eicr;
4603 + if ((ddi_taskq_dispatch(ixgbe->phy_taskq,
4604 + ixgbe_phy_check, (void *)ixgbe,
4605 + DDI_NOSLEEP)) != DDI_SUCCESS) {
4606 + ixgbe_log(ixgbe, "No memory available to dispatch "
4607 + "taskq for PHY check");
4608 + }
4609 + }
4207 4610 }
4208 4611
4209 4612 /*
4210 4613 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4211 4614 */
4212 4615 static uint_t
4213 4616 ixgbe_intr_legacy(void *arg1, void *arg2)
4214 4617 {
4215 4618 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4216 4619 struct ixgbe_hw *hw = &ixgbe->hw;
4217 4620 ixgbe_tx_ring_t *tx_ring;
4218 4621 ixgbe_rx_ring_t *rx_ring;
4219 4622 uint32_t eicr;
4220 4623 mblk_t *mp;
4221 4624 boolean_t tx_reschedule;
4222 4625 uint_t result;
4223 4626
4224 4627 _NOTE(ARGUNUSED(arg2));
4225 4628
4226 4629 mutex_enter(&ixgbe->gen_lock);
4227 4630 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4228 4631 mutex_exit(&ixgbe->gen_lock);
4229 4632 return (DDI_INTR_UNCLAIMED);
4230 4633 }
4231 4634
4232 4635 mp = NULL;
4233 4636 tx_reschedule = B_FALSE;
4234 4637
4235 4638 /*
4236 4639 * Any bit set in eicr: claim this interrupt
4237 4640 */
4238 4641 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4239 4642
4240 4643 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4241 4644 mutex_exit(&ixgbe->gen_lock);
4242 4645 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4243 4646 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4244 4647 return (DDI_INTR_CLAIMED);
4245 4648 }
4246 4649
4247 4650 if (eicr) {
4248 4651 /*
4249 4652 * For legacy interrupt, we have only one interrupt,
4250 4653 * so we have only one rx ring and one tx ring enabled.
4251 4654 */
4252 4655 ASSERT(ixgbe->num_rx_rings == 1);
4253 4656 ASSERT(ixgbe->num_tx_rings == 1);
4254 4657
4255 4658 /*
4256 4659 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4257 4660 */
4258 4661 if (eicr & 0x1) {
4259 4662 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4260 4663 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4261 4664 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4262 4665 /*
4263 4666 * Clean the rx descriptors
4264 4667 */
4265 4668 rx_ring = &ixgbe->rx_rings[0];
4266 4669 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4267 4670 }
4268 4671
4269 4672 /*
4270 4673 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4271 4674 */
4272 4675 if (eicr & 0x2) {
4273 4676 /*
4274 4677 * Recycle the tx descriptors
4275 4678 */
4276 4679 tx_ring = &ixgbe->tx_rings[0];
4277 4680 tx_ring->tx_recycle(tx_ring);
4278 4681
4279 4682 /*
4280 4683 * Schedule the re-transmit
4281 4684 */
4282 4685 tx_reschedule = (tx_ring->reschedule &&
4283 4686 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4284 4687 }
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
4285 4688
4286 4689 /* any interrupt type other than tx/rx */
4287 4690 if (eicr & ixgbe->capab->other_intr) {
4288 4691 switch (hw->mac.type) {
4289 4692 case ixgbe_mac_82598EB:
4290 4693 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4291 4694 break;
4292 4695
4293 4696 case ixgbe_mac_82599EB:
4294 4697 case ixgbe_mac_X540:
4698 + case ixgbe_mac_X550:
4699 + case ixgbe_mac_X550EM_x:
4295 4700 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4296 4701 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4297 4702 break;
4298 4703
4299 4704 default:
4300 4705 break;
4301 4706 }
4302 4707 ixgbe_intr_other_work(ixgbe, eicr);
4303 4708 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4304 4709 }
4305 4710
4306 4711 mutex_exit(&ixgbe->gen_lock);
4307 4712
4308 4713 result = DDI_INTR_CLAIMED;
4309 4714 } else {
4310 4715 mutex_exit(&ixgbe->gen_lock);
4311 4716
4312 4717 /*
4313 4718 * No interrupt cause bits set: don't claim this interrupt.
4314 4719 */
4315 4720 result = DDI_INTR_UNCLAIMED;
4316 4721 }
4317 4722
4318 4723 /* re-enable the interrupts which were automasked */
4319 4724 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4320 4725
4321 4726 /*
4322 4727 * Do the following work outside of the gen_lock
4323 4728 */
4324 4729 if (mp != NULL) {
4325 4730 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4326 4731 rx_ring->ring_gen_num);
4327 4732 }
4328 4733
4329 4734 if (tx_reschedule) {
4330 4735 tx_ring->reschedule = B_FALSE;
4331 4736 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4332 4737 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4333 4738 }
4334 4739
4335 4740 return (result);
4336 4741 }
4337 4742
4338 4743 /*
4339 4744 * ixgbe_intr_msi - Interrupt handler for MSI.
4340 4745 */
4341 4746 static uint_t
4342 4747 ixgbe_intr_msi(void *arg1, void *arg2)
4343 4748 {
4344 4749 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4345 4750 struct ixgbe_hw *hw = &ixgbe->hw;
4346 4751 uint32_t eicr;
4347 4752
4348 4753 _NOTE(ARGUNUSED(arg2));
4349 4754
4350 4755 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4351 4756
4352 4757 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4353 4758 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4354 4759 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4355 4760 return (DDI_INTR_CLAIMED);
4356 4761 }
4357 4762
4358 4763 /*
4359 4764 * For MSI interrupt, we have only one vector,
4360 4765 * so we have only one rx ring and one tx ring enabled.
4361 4766 */
4362 4767 ASSERT(ixgbe->num_rx_rings == 1);
4363 4768 ASSERT(ixgbe->num_tx_rings == 1);
4364 4769
4365 4770 /*
4366 4771 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4367 4772 */
4368 4773 if (eicr & 0x1) {
4369 4774 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4370 4775 }
4371 4776
4372 4777 /*
4373 4778 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4374 4779 */
4375 4780 if (eicr & 0x2) {
4376 4781 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4377 4782 }
4378 4783
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
4379 4784 /* any interrupt type other than tx/rx */
4380 4785 if (eicr & ixgbe->capab->other_intr) {
4381 4786 mutex_enter(&ixgbe->gen_lock);
4382 4787 switch (hw->mac.type) {
4383 4788 case ixgbe_mac_82598EB:
4384 4789 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4385 4790 break;
4386 4791
4387 4792 case ixgbe_mac_82599EB:
4388 4793 case ixgbe_mac_X540:
4794 + case ixgbe_mac_X550:
4795 + case ixgbe_mac_X550EM_x:
4389 4796 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4390 4797 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4391 4798 break;
4392 4799
4393 4800 default:
4394 4801 break;
4395 4802 }
4396 4803 ixgbe_intr_other_work(ixgbe, eicr);
4397 4804 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4398 4805 mutex_exit(&ixgbe->gen_lock);
4399 4806 }
4400 4807
4401 4808 /* re-enable the interrupts which were automasked */
4402 4809 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4403 4810
4404 4811 return (DDI_INTR_CLAIMED);
4405 4812 }
4406 4813
4407 4814 /*
4408 4815 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4409 4816 */
4410 4817 static uint_t
4411 4818 ixgbe_intr_msix(void *arg1, void *arg2)
4412 4819 {
4413 4820 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4414 4821 ixgbe_t *ixgbe = vect->ixgbe;
4415 4822 struct ixgbe_hw *hw = &ixgbe->hw;
4416 4823 uint32_t eicr;
4417 4824 int r_idx = 0;
4418 4825
4419 4826 _NOTE(ARGUNUSED(arg2));
4420 4827
4421 4828 /*
4422 4829 * Clean each rx ring that has its bit set in the map
4423 4830 */
4424 4831 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4425 4832 while (r_idx >= 0) {
4426 4833 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4427 4834 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4428 4835 (ixgbe->num_rx_rings - 1));
4429 4836 }
4430 4837
4431 4838 /*
4432 4839 * Clean each tx ring that has its bit set in the map
4433 4840 */
4434 4841 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4435 4842 while (r_idx >= 0) {
4436 4843 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4437 4844 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4438 4845 (ixgbe->num_tx_rings - 1));
4439 4846 }
4440 4847
4441 4848
4442 4849 /*
4443 4850 * Clean other interrupt (link change) that has its bit set in the map
4444 4851 */
4445 4852 if (BT_TEST(vect->other_map, 0) == 1) {
4446 4853 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4447 4854
4448 4855 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4449 4856 DDI_FM_OK) {
4450 4857 ddi_fm_service_impact(ixgbe->dip,
4451 4858 DDI_SERVICE_DEGRADED);
4452 4859 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4453 4860 return (DDI_INTR_CLAIMED);
4454 4861 }
4455 4862
4456 4863 /*
4457 4864 * Check "other" cause bits: any interrupt type other than tx/rx
4458 4865 */
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
4459 4866 if (eicr & ixgbe->capab->other_intr) {
4460 4867 mutex_enter(&ixgbe->gen_lock);
4461 4868 switch (hw->mac.type) {
4462 4869 case ixgbe_mac_82598EB:
4463 4870 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4464 4871 ixgbe_intr_other_work(ixgbe, eicr);
4465 4872 break;
4466 4873
4467 4874 case ixgbe_mac_82599EB:
4468 4875 case ixgbe_mac_X540:
4876 + case ixgbe_mac_X550:
4877 + case ixgbe_mac_X550EM_x:
4469 4878 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4470 4879 ixgbe_intr_other_work(ixgbe, eicr);
4471 4880 break;
4472 4881
4473 4882 default:
4474 4883 break;
4475 4884 }
4476 4885 mutex_exit(&ixgbe->gen_lock);
4477 4886 }
4478 4887
4479 4888 /* re-enable the interrupts which were automasked */
4480 4889 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4481 4890 }
4482 4891
4483 4892 return (DDI_INTR_CLAIMED);
4484 4893 }
4485 4894
4486 4895 /*
4487 4896 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4488 4897 *
4489 4898 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4490 4899 * if not successful, try Legacy.
4491 4900 * ixgbe->intr_force can be used to force sequence to start with
4492 4901 * any of the 3 types.
4493 4902 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4494 4903 */
4495 4904 static int
4496 4905 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4497 4906 {
4498 4907 dev_info_t *devinfo;
4499 4908 int intr_types;
4500 4909 int rc;
4501 4910
4502 4911 devinfo = ixgbe->dip;
4503 4912
4504 4913 /*
4505 4914 * Get supported interrupt types
4506 4915 */
4507 4916 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4508 4917
4509 4918 if (rc != DDI_SUCCESS) {
4510 4919 ixgbe_log(ixgbe,
4511 4920 "Get supported interrupt types failed: %d", rc);
4512 4921 return (IXGBE_FAILURE);
4513 4922 }
4514 4923 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4515 4924
4516 4925 ixgbe->intr_type = 0;
4517 4926
4518 4927 /*
4519 4928 * Install MSI-X interrupts
4520 4929 */
4521 4930 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4522 4931 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4523 4932 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4524 4933 if (rc == IXGBE_SUCCESS)
4525 4934 return (IXGBE_SUCCESS);
4526 4935
4527 4936 ixgbe_log(ixgbe,
4528 4937 "Allocate MSI-X failed, trying MSI interrupts...");
4529 4938 }
4530 4939
4531 4940 /*
4532 4941 * MSI-X not used, force rings and groups to 1
4533 4942 */
4534 4943 ixgbe->num_rx_rings = 1;
4535 4944 ixgbe->num_rx_groups = 1;
4536 4945 ixgbe->num_tx_rings = 1;
4537 4946 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4538 4947 ixgbe_log(ixgbe,
4539 4948 "MSI-X not used, force rings and groups number to 1");
4540 4949
4541 4950 /*
4542 4951 * Install MSI interrupts
4543 4952 */
4544 4953 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4545 4954 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4546 4955 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4547 4956 if (rc == IXGBE_SUCCESS)
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
4548 4957 return (IXGBE_SUCCESS);
4549 4958
4550 4959 ixgbe_log(ixgbe,
4551 4960 "Allocate MSI failed, trying Legacy interrupts...");
4552 4961 }
4553 4962
4554 4963 /*
4555 4964 * Install legacy interrupts
4556 4965 */
4557 4966 if (intr_types & DDI_INTR_TYPE_FIXED) {
4967 + /*
4968 + * Disallow legacy interrupts for X550. X550 has a silicon
4969 + * bug which prevents Shared Legacy interrupts from working.
4970 + * For details, please reference:
4971 + *
4972 + * Intel Ethernet Controller X550 Specification Update rev. 2.1
4973 + * May 2016, erratum 22: PCIe Interrupt Status Bit
4974 + */
4975 + if (ixgbe->hw.mac.type == ixgbe_mac_X550 ||
4976 + ixgbe->hw.mac.type == ixgbe_mac_X550EM_x ||
4977 + ixgbe->hw.mac.type == ixgbe_mac_X550_vf ||
4978 + ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) {
4979 + ixgbe_log(ixgbe,
4980 + "Legacy interrupts are not supported on this "
4981 + "adapter. Please use MSI or MSI-X instead.");
4982 + return (IXGBE_FAILURE);
4983 + }
4558 4984 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4559 4985 if (rc == IXGBE_SUCCESS)
4560 4986 return (IXGBE_SUCCESS);
4561 4987
4562 4988 ixgbe_log(ixgbe,
4563 4989 "Allocate Legacy interrupts failed");
4564 4990 }
4565 4991
4566 4992 /*
4567 4993 * If none of the 3 types succeeded, return failure
4568 4994 */
4569 4995 return (IXGBE_FAILURE);
4570 4996 }
4571 4997
4572 4998 /*
4573 4999 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4574 5000 *
4575 5001 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4576 5002 * if fewer than 2 handles are available, return failure.
4577 5003 * Upon success, this maps the vectors to rx and tx rings for
4578 5004 * interrupts.
4579 5005 */
4580 5006 static int
4581 5007 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4582 5008 {
4583 5009 dev_info_t *devinfo;
4584 5010 int request, count, actual;
4585 5011 int minimum;
4586 5012 int rc;
4587 5013 uint32_t ring_per_group;
4588 5014
4589 5015 devinfo = ixgbe->dip;
4590 5016
4591 5017 switch (intr_type) {
4592 5018 case DDI_INTR_TYPE_FIXED:
4593 5019 request = 1; /* Request 1 legacy interrupt handle */
4594 5020 minimum = 1;
4595 5021 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4596 5022 break;
4597 5023
4598 5024 case DDI_INTR_TYPE_MSI:
4599 5025 request = 1; /* Request 1 MSI interrupt handle */
4600 5026 minimum = 1;
4601 5027 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4602 5028 break;
4603 5029
4604 5030 case DDI_INTR_TYPE_MSIX:
4605 5031 /*
4606 5032 * Best number of vectors for the adapter is
4607 5033 * (# rx rings + # tx rings), however we will
4608 5034 * limit the request number.
4609 5035 */
4610 5036 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4611 5037 if (request > ixgbe->capab->max_ring_vect)
4612 5038 request = ixgbe->capab->max_ring_vect;
4613 5039 minimum = 1;
4614 5040 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4615 5041 break;
4616 5042
4617 5043 default:
4618 5044 ixgbe_log(ixgbe,
4619 5045 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4620 5046 intr_type);
4621 5047 return (IXGBE_FAILURE);
4622 5048 }
4623 5049 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
4624 5050 request, minimum);
4625 5051
4626 5052 /*
4627 5053 * Get number of supported interrupts
4628 5054 */
4629 5055 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4630 5056 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4631 5057 ixgbe_log(ixgbe,
4632 5058 "Get interrupt number failed. Return: %d, count: %d",
4633 5059 rc, count);
4634 5060 return (IXGBE_FAILURE);
4635 5061 }
4636 5062 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4637 5063
4638 5064 actual = 0;
4639 5065 ixgbe->intr_cnt = 0;
4640 5066 ixgbe->intr_cnt_max = 0;
4641 5067 ixgbe->intr_cnt_min = 0;
4642 5068
4643 5069 /*
4644 5070 * Allocate an array of interrupt handles
4645 5071 */
4646 5072 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4647 5073 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4648 5074
4649 5075 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4650 5076 request, &actual, DDI_INTR_ALLOC_NORMAL);
4651 5077 if (rc != DDI_SUCCESS) {
4652 5078 ixgbe_log(ixgbe, "Allocate interrupts failed. "
4653 5079 "return: %d, request: %d, actual: %d",
4654 5080 rc, request, actual);
4655 5081 goto alloc_handle_fail;
4656 5082 }
4657 5083 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4658 5084
4659 5085 /*
4660 5086 * upper/lower limit of interrupts
4661 5087 */
4662 5088 ixgbe->intr_cnt = actual;
4663 5089 ixgbe->intr_cnt_max = request;
4664 5090 ixgbe->intr_cnt_min = minimum;
4665 5091
4666 5092 /*
4667 5093 * rss number per group should not exceed the rx interrupt number,
4668 5094 * else need to adjust rx ring number.
4669 5095 */
4670 5096 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4671 5097 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4672 5098 if (actual < ring_per_group) {
4673 5099 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4674 5100 ixgbe_setup_vmdq_rss_conf(ixgbe);
4675 5101 }
4676 5102
4677 5103 /*
4678 5104 * Now we know the actual number of vectors. Here we map the vector
4679 5105 * to other, rx rings and tx ring.
4680 5106 */
4681 5107 if (actual < minimum) {
4682 5108 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4683 5109 actual);
4684 5110 goto alloc_handle_fail;
4685 5111 }
4686 5112
4687 5113 /*
4688 5114 * Get priority for first vector, assume remaining are all the same
4689 5115 */
4690 5116 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4691 5117 if (rc != DDI_SUCCESS) {
4692 5118 ixgbe_log(ixgbe,
4693 5119 "Get interrupt priority failed: %d", rc);
4694 5120 goto alloc_handle_fail;
4695 5121 }
4696 5122
4697 5123 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4698 5124 if (rc != DDI_SUCCESS) {
4699 5125 ixgbe_log(ixgbe,
4700 5126 "Get interrupt cap failed: %d", rc);
4701 5127 goto alloc_handle_fail;
4702 5128 }
4703 5129
4704 5130 ixgbe->intr_type = intr_type;
4705 5131
4706 5132 return (IXGBE_SUCCESS);
4707 5133
4708 5134 alloc_handle_fail:
4709 5135 ixgbe_rem_intrs(ixgbe);
4710 5136
4711 5137 return (IXGBE_FAILURE);
4712 5138 }
4713 5139
4714 5140 /*
4715 5141 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4716 5142 *
4717 5143 * Before adding the interrupt handlers, the interrupt vectors have
4718 5144 * been allocated, and the rx/tx rings have also been allocated.
4719 5145 */
4720 5146 static int
4721 5147 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4722 5148 {
4723 5149 int vector = 0;
4724 5150 int rc;
4725 5151
4726 5152 switch (ixgbe->intr_type) {
4727 5153 case DDI_INTR_TYPE_MSIX:
4728 5154 /*
4729 5155 * Add interrupt handler for all vectors
4730 5156 */
4731 5157 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4732 5158 /*
4733 5159 * install pointer to vect_map[vector]
4734 5160 */
4735 5161 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4736 5162 (ddi_intr_handler_t *)ixgbe_intr_msix,
4737 5163 (void *)&ixgbe->vect_map[vector], NULL);
4738 5164
4739 5165 if (rc != DDI_SUCCESS) {
4740 5166 ixgbe_log(ixgbe,
4741 5167 "Add interrupt handler failed. "
4742 5168 "return: %d, vector: %d", rc, vector);
4743 5169 for (vector--; vector >= 0; vector--) {
4744 5170 (void) ddi_intr_remove_handler(
4745 5171 ixgbe->htable[vector]);
4746 5172 }
4747 5173 return (IXGBE_FAILURE);
4748 5174 }
4749 5175 }
4750 5176
4751 5177 break;
4752 5178
4753 5179 case DDI_INTR_TYPE_MSI:
4754 5180 /*
4755 5181 * Add interrupt handlers for the only vector
4756 5182 */
4757 5183 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4758 5184 (ddi_intr_handler_t *)ixgbe_intr_msi,
4759 5185 (void *)ixgbe, NULL);
4760 5186
4761 5187 if (rc != DDI_SUCCESS) {
4762 5188 ixgbe_log(ixgbe,
4763 5189 "Add MSI interrupt handler failed: %d", rc);
4764 5190 return (IXGBE_FAILURE);
4765 5191 }
4766 5192
4767 5193 break;
4768 5194
4769 5195 case DDI_INTR_TYPE_FIXED:
4770 5196 /*
4771 5197 * Add interrupt handlers for the only vector
4772 5198 */
4773 5199 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4774 5200 (ddi_intr_handler_t *)ixgbe_intr_legacy,
4775 5201 (void *)ixgbe, NULL);
4776 5202
4777 5203 if (rc != DDI_SUCCESS) {
4778 5204 ixgbe_log(ixgbe,
4779 5205 "Add legacy interrupt handler failed: %d", rc);
4780 5206 return (IXGBE_FAILURE);
4781 5207 }
4782 5208
4783 5209 break;
4784 5210
4785 5211 default:
4786 5212 return (IXGBE_FAILURE);
4787 5213 }
4788 5214
4789 5215 return (IXGBE_SUCCESS);
4790 5216 }
4791 5217
4792 5218 #pragma inline(ixgbe_map_rxring_to_vector)
4793 5219 /*
4794 5220 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4795 5221 */
4796 5222 static void
4797 5223 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4798 5224 {
4799 5225 /*
4800 5226 * Set bit in map
4801 5227 */
4802 5228 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4803 5229
4804 5230 /*
4805 5231 * Count bits set
4806 5232 */
4807 5233 ixgbe->vect_map[v_idx].rxr_cnt++;
4808 5234
4809 5235 /*
4810 5236 * Remember bit position
4811 5237 */
4812 5238 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4813 5239 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4814 5240 }
4815 5241
4816 5242 #pragma inline(ixgbe_map_txring_to_vector)
4817 5243 /*
4818 5244 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4819 5245 */
4820 5246 static void
4821 5247 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4822 5248 {
4823 5249 /*
4824 5250 * Set bit in map
4825 5251 */
4826 5252 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4827 5253
4828 5254 /*
4829 5255 * Count bits set
4830 5256 */
4831 5257 ixgbe->vect_map[v_idx].txr_cnt++;
4832 5258
4833 5259 /*
4834 5260 * Remember bit position
4835 5261 */
4836 5262 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4837 5263 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4838 5264 }
4839 5265
4840 5266 /*
4841 5267 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4842 5268 * allocation register (IVAR).
4843 5269 * cause:
4844 5270 * -1 : other cause
4845 5271 * 0 : rx
4846 5272 * 1 : tx
4847 5273 */
4848 5274 static void
4849 5275 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4850 5276 int8_t cause)
4851 5277 {
4852 5278 struct ixgbe_hw *hw = &ixgbe->hw;
4853 5279 u32 ivar, index;
4854 5280
4855 5281 switch (hw->mac.type) {
4856 5282 case ixgbe_mac_82598EB:
4857 5283 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4858 5284 if (cause == -1) {
4859 5285 cause = 0;
↓ open down ↓ |
292 lines elided |
↑ open up ↑ |
4860 5286 }
4861 5287 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4862 5288 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4863 5289 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4864 5290 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4865 5291 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4866 5292 break;
4867 5293
4868 5294 case ixgbe_mac_82599EB:
4869 5295 case ixgbe_mac_X540:
5296 + case ixgbe_mac_X550:
5297 + case ixgbe_mac_X550EM_x:
4870 5298 if (cause == -1) {
4871 5299 /* other causes */
4872 5300 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4873 5301 index = (intr_alloc_entry & 1) * 8;
4874 5302 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4875 5303 ivar &= ~(0xFF << index);
4876 5304 ivar |= (msix_vector << index);
4877 5305 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4878 5306 } else {
4879 5307 /* tx or rx causes */
4880 5308 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4881 5309 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4882 5310 ivar = IXGBE_READ_REG(hw,
4883 5311 IXGBE_IVAR(intr_alloc_entry >> 1));
4884 5312 ivar &= ~(0xFF << index);
4885 5313 ivar |= (msix_vector << index);
4886 5314 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4887 5315 ivar);
4888 5316 }
4889 5317 break;
4890 5318
4891 5319 default:
4892 5320 break;
4893 5321 }
4894 5322 }
4895 5323
4896 5324 /*
4897 5325 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4898 5326 * given interrupt vector allocation register (IVAR).
4899 5327 * cause:
4900 5328 * -1 : other cause
4901 5329 * 0 : rx
4902 5330 * 1 : tx
4903 5331 */
4904 5332 static void
4905 5333 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4906 5334 {
4907 5335 struct ixgbe_hw *hw = &ixgbe->hw;
4908 5336 u32 ivar, index;
4909 5337
4910 5338 switch (hw->mac.type) {
4911 5339 case ixgbe_mac_82598EB:
4912 5340 if (cause == -1) {
4913 5341 cause = 0;
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
4914 5342 }
4915 5343 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4916 5344 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4917 5345 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4918 5346 (intr_alloc_entry & 0x3)));
4919 5347 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4920 5348 break;
4921 5349
4922 5350 case ixgbe_mac_82599EB:
4923 5351 case ixgbe_mac_X540:
5352 + case ixgbe_mac_X550:
5353 + case ixgbe_mac_X550EM_x:
4924 5354 if (cause == -1) {
4925 5355 /* other causes */
4926 5356 index = (intr_alloc_entry & 1) * 8;
4927 5357 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4928 5358 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4929 5359 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4930 5360 } else {
4931 5361 /* tx or rx causes */
4932 5362 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4933 5363 ivar = IXGBE_READ_REG(hw,
4934 5364 IXGBE_IVAR(intr_alloc_entry >> 1));
4935 5365 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4936 5366 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4937 5367 ivar);
4938 5368 }
4939 5369 break;
4940 5370
4941 5371 default:
4942 5372 break;
4943 5373 }
4944 5374 }
4945 5375
4946 5376 /*
4947 5377 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4948 5378 * given interrupt vector allocation register (IVAR).
4949 5379 * cause:
4950 5380 * -1 : other cause
4951 5381 * 0 : rx
4952 5382 * 1 : tx
4953 5383 */
4954 5384 static void
4955 5385 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4956 5386 {
4957 5387 struct ixgbe_hw *hw = &ixgbe->hw;
4958 5388 u32 ivar, index;
4959 5389
4960 5390 switch (hw->mac.type) {
4961 5391 case ixgbe_mac_82598EB:
4962 5392 if (cause == -1) {
4963 5393 cause = 0;
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
4964 5394 }
4965 5395 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4966 5396 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4967 5397 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4968 5398 (intr_alloc_entry & 0x3)));
4969 5399 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4970 5400 break;
4971 5401
4972 5402 case ixgbe_mac_82599EB:
4973 5403 case ixgbe_mac_X540:
5404 + case ixgbe_mac_X550:
5405 + case ixgbe_mac_X550EM_x:
4974 5406 if (cause == -1) {
4975 5407 /* other causes */
4976 5408 index = (intr_alloc_entry & 1) * 8;
4977 5409 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4978 5410 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4979 5411 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4980 5412 } else {
4981 5413 /* tx or rx causes */
4982 5414 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4983 5415 ivar = IXGBE_READ_REG(hw,
4984 5416 IXGBE_IVAR(intr_alloc_entry >> 1));
4985 5417 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4986 5418 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4987 5419 ivar);
4988 5420 }
4989 5421 break;
4990 5422
4991 5423 default:
4992 5424 break;
4993 5425 }
4994 5426 }
4995 5427
4996 5428 /*
4997 5429 * Convert the rx ring index driver maintained to the rx ring index
4998 5430 * in h/w.
4999 5431 */
5000 5432 static uint32_t
5001 5433 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5002 5434 {
5003 5435
5004 5436 struct ixgbe_hw *hw = &ixgbe->hw;
5005 5437 uint32_t rx_ring_per_group, hw_rx_index;
5006 5438
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
5007 5439 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5008 5440 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5009 5441 return (sw_rx_index);
5010 5442 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5011 5443 switch (hw->mac.type) {
5012 5444 case ixgbe_mac_82598EB:
5013 5445 return (sw_rx_index);
5014 5446
5015 5447 case ixgbe_mac_82599EB:
5016 5448 case ixgbe_mac_X540:
5449 + case ixgbe_mac_X550:
5450 + case ixgbe_mac_X550EM_x:
5017 5451 return (sw_rx_index * 2);
5018 5452
5019 5453 default:
5020 5454 break;
5021 5455 }
5022 5456 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5023 5457 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5024 5458
5025 5459 switch (hw->mac.type) {
5026 5460 case ixgbe_mac_82598EB:
5027 5461 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5028 5462 16 + (sw_rx_index % rx_ring_per_group);
5029 5463 return (hw_rx_index);
5030 5464
5031 5465 case ixgbe_mac_82599EB:
5032 5466 case ixgbe_mac_X540:
5467 + case ixgbe_mac_X550:
5468 + case ixgbe_mac_X550EM_x:
5033 5469 if (ixgbe->num_rx_groups > 32) {
5034 5470 hw_rx_index = (sw_rx_index /
5035 5471 rx_ring_per_group) * 2 +
5036 5472 (sw_rx_index % rx_ring_per_group);
5037 5473 } else {
5038 5474 hw_rx_index = (sw_rx_index /
5039 5475 rx_ring_per_group) * 4 +
5040 5476 (sw_rx_index % rx_ring_per_group);
5041 5477 }
5042 5478 return (hw_rx_index);
5043 5479
5044 5480 default:
5045 5481 break;
5046 5482 }
5047 5483 }
5048 5484
5049 5485 /*
5050 5486 * Should never reach. Just to make compiler happy.
5051 5487 */
5052 5488 return (sw_rx_index);
5053 5489 }
5054 5490
5055 5491 /*
5056 5492 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
5057 5493 *
5058 5494 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
5059 5495 * to vector[0 - (intr_cnt -1)].
5060 5496 */
5061 5497 static int
5062 5498 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
5063 5499 {
5064 5500 int i, vector = 0;
5065 5501
5066 5502 /* initialize vector map */
5067 5503 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5068 5504 for (i = 0; i < ixgbe->intr_cnt; i++) {
5069 5505 ixgbe->vect_map[i].ixgbe = ixgbe;
5070 5506 }
5071 5507
5072 5508 /*
5073 5509 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5074 5510 * tx rings[0] on RTxQ[1].
5075 5511 */
5076 5512 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5077 5513 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5078 5514 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5079 5515 return (IXGBE_SUCCESS);
5080 5516 }
5081 5517
5082 5518 /*
5083 5519 * Interrupts/vectors mapping for MSI-X
5084 5520 */
5085 5521
5086 5522 /*
5087 5523 * Map other interrupt to vector 0,
5088 5524 * Set bit in map and count the bits set.
5089 5525 */
5090 5526 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5091 5527 ixgbe->vect_map[vector].other_cnt++;
5092 5528
5093 5529 /*
5094 5530 * Map rx ring interrupts to vectors
5095 5531 */
5096 5532 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5097 5533 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5098 5534 vector = (vector +1) % ixgbe->intr_cnt;
5099 5535 }
5100 5536
5101 5537 /*
5102 5538 * Map tx ring interrupts to vectors
5103 5539 */
5104 5540 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5105 5541 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5106 5542 vector = (vector +1) % ixgbe->intr_cnt;
5107 5543 }
5108 5544
5109 5545 return (IXGBE_SUCCESS);
5110 5546 }
5111 5547
5112 5548 /*
5113 5549 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5114 5550 *
5115 5551 * This relies on ring/vector mapping already set up in the
5116 5552 * vect_map[] structures
5117 5553 */
5118 5554 static void
5119 5555 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5120 5556 {
5121 5557 struct ixgbe_hw *hw = &ixgbe->hw;
5122 5558 ixgbe_intr_vector_t *vect; /* vector bitmap */
5123 5559 int r_idx; /* ring index */
5124 5560 int v_idx; /* vector index */
5125 5561 uint32_t hw_index;
5126 5562
5127 5563 /*
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
5128 5564 * Clear any previous entries
5129 5565 */
5130 5566 switch (hw->mac.type) {
5131 5567 case ixgbe_mac_82598EB:
5132 5568 for (v_idx = 0; v_idx < 25; v_idx++)
5133 5569 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5134 5570 break;
5135 5571
5136 5572 case ixgbe_mac_82599EB:
5137 5573 case ixgbe_mac_X540:
5574 + case ixgbe_mac_X550:
5575 + case ixgbe_mac_X550EM_x:
5138 5576 for (v_idx = 0; v_idx < 64; v_idx++)
5139 5577 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5140 5578 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5141 5579 break;
5142 5580
5143 5581 default:
5144 5582 break;
5145 5583 }
5146 5584
5147 5585 /*
5148 5586 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5149 5587 * tx rings[0] will use RTxQ[1].
5150 5588 */
5151 5589 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5152 5590 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5153 5591 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5154 5592 return;
5155 5593 }
5156 5594
5157 5595 /*
5158 5596 * For MSI-X interrupt, "Other" is always on vector[0].
5159 5597 */
5160 5598 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5161 5599
5162 5600 /*
5163 5601 * For each interrupt vector, populate the IVAR table
5164 5602 */
5165 5603 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5166 5604 vect = &ixgbe->vect_map[v_idx];
5167 5605
5168 5606 /*
5169 5607 * For each rx ring bit set
5170 5608 */
5171 5609 r_idx = bt_getlowbit(vect->rx_map, 0,
5172 5610 (ixgbe->num_rx_rings - 1));
5173 5611
5174 5612 while (r_idx >= 0) {
5175 5613 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5176 5614 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5177 5615 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5178 5616 (ixgbe->num_rx_rings - 1));
5179 5617 }
5180 5618
5181 5619 /*
5182 5620 * For each tx ring bit set
5183 5621 */
5184 5622 r_idx = bt_getlowbit(vect->tx_map, 0,
5185 5623 (ixgbe->num_tx_rings - 1));
5186 5624
5187 5625 while (r_idx >= 0) {
5188 5626 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5189 5627 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5190 5628 (ixgbe->num_tx_rings - 1));
5191 5629 }
5192 5630 }
5193 5631 }
5194 5632
5195 5633 /*
5196 5634 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5197 5635 */
5198 5636 static void
5199 5637 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5200 5638 {
5201 5639 int i;
5202 5640 int rc;
5203 5641
5204 5642 for (i = 0; i < ixgbe->intr_cnt; i++) {
5205 5643 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5206 5644 if (rc != DDI_SUCCESS) {
5207 5645 IXGBE_DEBUGLOG_1(ixgbe,
5208 5646 "Remove intr handler failed: %d", rc);
5209 5647 }
5210 5648 }
5211 5649 }
5212 5650
5213 5651 /*
5214 5652 * ixgbe_rem_intrs - Remove the allocated interrupts.
5215 5653 */
5216 5654 static void
5217 5655 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5218 5656 {
5219 5657 int i;
5220 5658 int rc;
5221 5659
5222 5660 for (i = 0; i < ixgbe->intr_cnt; i++) {
5223 5661 rc = ddi_intr_free(ixgbe->htable[i]);
5224 5662 if (rc != DDI_SUCCESS) {
5225 5663 IXGBE_DEBUGLOG_1(ixgbe,
5226 5664 "Free intr failed: %d", rc);
5227 5665 }
5228 5666 }
5229 5667
5230 5668 kmem_free(ixgbe->htable, ixgbe->intr_size);
5231 5669 ixgbe->htable = NULL;
5232 5670 }
5233 5671
5234 5672 /*
5235 5673 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5236 5674 */
5237 5675 static int
5238 5676 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5239 5677 {
5240 5678 int i;
5241 5679 int rc;
5242 5680
5243 5681 /*
5244 5682 * Enable interrupts
5245 5683 */
5246 5684 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5247 5685 /*
5248 5686 * Call ddi_intr_block_enable() for MSI
5249 5687 */
5250 5688 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5251 5689 if (rc != DDI_SUCCESS) {
5252 5690 ixgbe_log(ixgbe,
5253 5691 "Enable block intr failed: %d", rc);
5254 5692 return (IXGBE_FAILURE);
5255 5693 }
5256 5694 } else {
5257 5695 /*
5258 5696 * Call ddi_intr_enable() for Legacy/MSI non block enable
5259 5697 */
5260 5698 for (i = 0; i < ixgbe->intr_cnt; i++) {
5261 5699 rc = ddi_intr_enable(ixgbe->htable[i]);
5262 5700 if (rc != DDI_SUCCESS) {
5263 5701 ixgbe_log(ixgbe,
5264 5702 "Enable intr failed: %d", rc);
5265 5703 return (IXGBE_FAILURE);
5266 5704 }
5267 5705 }
5268 5706 }
5269 5707
5270 5708 return (IXGBE_SUCCESS);
5271 5709 }
5272 5710
5273 5711 /*
5274 5712 * ixgbe_disable_intrs - Disable all the interrupts.
5275 5713 */
5276 5714 static int
5277 5715 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5278 5716 {
5279 5717 int i;
5280 5718 int rc;
5281 5719
5282 5720 /*
5283 5721 * Disable all interrupts
5284 5722 */
5285 5723 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5286 5724 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5287 5725 if (rc != DDI_SUCCESS) {
5288 5726 ixgbe_log(ixgbe,
5289 5727 "Disable block intr failed: %d", rc);
5290 5728 return (IXGBE_FAILURE);
5291 5729 }
5292 5730 } else {
5293 5731 for (i = 0; i < ixgbe->intr_cnt; i++) {
5294 5732 rc = ddi_intr_disable(ixgbe->htable[i]);
5295 5733 if (rc != DDI_SUCCESS) {
5296 5734 ixgbe_log(ixgbe,
5297 5735 "Disable intr failed: %d", rc);
5298 5736 return (IXGBE_FAILURE);
5299 5737 }
5300 5738 }
5301 5739 }
5302 5740
↓ open down ↓ |
155 lines elided |
↑ open up ↑ |
5303 5741 return (IXGBE_SUCCESS);
5304 5742 }
5305 5743
5306 5744 /*
5307 5745 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5308 5746 */
5309 5747 static void
5310 5748 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5311 5749 {
5312 5750 struct ixgbe_hw *hw = &ixgbe->hw;
5313 - ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5751 + ixgbe_link_speed speed = 0;
5314 5752 boolean_t link_up = B_FALSE;
5315 5753 uint32_t pcs1g_anlp = 0;
5316 - uint32_t pcs1g_ana = 0;
5317 - boolean_t autoneg = B_FALSE;
5318 5754
5319 5755 ASSERT(mutex_owned(&ixgbe->gen_lock));
5320 5756 ixgbe->param_lp_1000fdx_cap = 0;
5321 5757 ixgbe->param_lp_100fdx_cap = 0;
5322 5758
5323 5759 /* check for link, don't wait */
5324 - (void) ixgbe_check_link(hw, &speed, &link_up, false);
5325 - pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5760 + (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
5326 5761
5762 + /*
5763 + * Update the observed Link Partner's capabilities. Not all adapters
5764 + * can provide full information on the LP's capable speeds, so we
5765 + * provide what we can.
5766 + */
5327 5767 if (link_up) {
5328 5768 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5329 5769
5330 5770 ixgbe->param_lp_1000fdx_cap =
5331 5771 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5332 5772 ixgbe->param_lp_100fdx_cap =
5333 5773 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5334 5774 }
5335 5775
5336 - (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5776 + /*
5777 + * Update GLD's notion of the adapter's currently advertised speeds.
5778 + * Since the common code doesn't always record the current autonegotiate
5779 + * settings in the phy struct for all parts (specifically, adapters with
5780 + * SFPs) we first test to see if it is 0, and if so, we fall back to
5781 + * using the adapter's speed capabilities which we saved during instance
5782 + * init in ixgbe_init_params().
5783 + *
5784 + * Adapters with SFPs will always be shown as advertising all of their
5785 + * supported speeds, and adapters with baseT PHYs (where the phy struct
5786 + * is maintained by the common code) will always have a factual view of
5787 + * their currently-advertised speeds. In the case of SFPs, this is
5788 + * acceptable as we default to advertising all speeds that the adapter
5789 + * claims to support, and those properties are immutable; unlike on
5790 + * baseT (copper) PHYs, where speeds can be enabled or disabled at will.
5791 + */
5792 + speed = hw->phy.autoneg_advertised;
5793 + if (speed == 0)
5794 + speed = ixgbe->speeds_supported;
5337 5795
5338 - ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5339 - (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5340 - ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5341 - (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5796 + ixgbe->param_adv_10000fdx_cap =
5797 + (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0;
5798 + ixgbe->param_adv_5000fdx_cap =
5799 + (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0;
5800 + ixgbe->param_adv_2500fdx_cap =
5801 + (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0;
5802 + ixgbe->param_adv_1000fdx_cap =
5803 + (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0;
5804 + ixgbe->param_adv_100fdx_cap =
5805 + (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0;
5342 5806 }
5343 5807
5344 5808 /*
5345 5809 * ixgbe_get_driver_control - Notify that driver is in control of device.
5346 5810 */
5347 5811 static void
5348 5812 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5349 5813 {
5350 5814 uint32_t ctrl_ext;
5351 5815
5352 5816 /*
5353 5817 * Notify firmware that driver is in control of device
5354 5818 */
5355 5819 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5356 5820 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5357 5821 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5358 5822 }
5359 5823
5360 5824 /*
5361 5825 * ixgbe_release_driver_control - Notify that driver is no longer in control
5362 5826 * of device.
5363 5827 */
5364 5828 static void
5365 5829 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5366 5830 {
5367 5831 uint32_t ctrl_ext;
5368 5832
5369 5833 /*
5370 5834 * Notify firmware that driver is no longer in control of device
5371 5835 */
5372 5836 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5373 5837 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5374 5838 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5375 5839 }
5376 5840
5377 5841 /*
5378 5842 * ixgbe_atomic_reserve - Atomic decrease operation.
5379 5843 */
5380 5844 int
5381 5845 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5382 5846 {
5383 5847 uint32_t oldval;
5384 5848 uint32_t newval;
5385 5849
5386 5850 /*
5387 5851 * ATOMICALLY
5388 5852 */
5389 5853 do {
5390 5854 oldval = *count_p;
5391 5855 if (oldval < n)
5392 5856 return (-1);
5393 5857 newval = oldval - n;
5394 5858 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5395 5859
5396 5860 return (newval);
5397 5861 }
5398 5862
5399 5863 /*
5400 5864 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5401 5865 */
5402 5866 static uint8_t *
5403 5867 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5404 5868 {
5405 5869 uint8_t *addr = *upd_ptr;
5406 5870 uint8_t *new_ptr;
5407 5871
5408 5872 _NOTE(ARGUNUSED(hw));
5409 5873 _NOTE(ARGUNUSED(vmdq));
5410 5874
5411 5875 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5412 5876 *upd_ptr = new_ptr;
5413 5877 return (addr);
5414 5878 }
5415 5879
5416 5880 /*
5417 5881 * FMA support
5418 5882 */
5419 5883 int
5420 5884 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5421 5885 {
5422 5886 ddi_fm_error_t de;
5423 5887
5424 5888 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5425 5889 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5426 5890 return (de.fme_status);
5427 5891 }
5428 5892
5429 5893 int
5430 5894 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5431 5895 {
5432 5896 ddi_fm_error_t de;
5433 5897
5434 5898 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5435 5899 return (de.fme_status);
5436 5900 }
5437 5901
5438 5902 /*
5439 5903 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5440 5904 */
5441 5905 static int
5442 5906 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5443 5907 {
5444 5908 _NOTE(ARGUNUSED(impl_data));
5445 5909 /*
5446 5910 * as the driver can always deal with an error in any dma or
5447 5911 * access handle, we can just return the fme_status value.
5448 5912 */
5449 5913 pci_ereport_post(dip, err, NULL);
5450 5914 return (err->fme_status);
5451 5915 }
5452 5916
5453 5917 static void
5454 5918 ixgbe_fm_init(ixgbe_t *ixgbe)
5455 5919 {
5456 5920 ddi_iblock_cookie_t iblk;
5457 5921 int fma_dma_flag;
5458 5922
5459 5923 /*
5460 5924 * Only register with IO Fault Services if we have some capability
5461 5925 */
5462 5926 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5463 5927 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5464 5928 } else {
5465 5929 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5466 5930 }
5467 5931
5468 5932 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5469 5933 fma_dma_flag = 1;
5470 5934 } else {
5471 5935 fma_dma_flag = 0;
5472 5936 }
5473 5937
5474 5938 ixgbe_set_fma_flags(fma_dma_flag);
5475 5939
5476 5940 if (ixgbe->fm_capabilities) {
5477 5941
5478 5942 /*
5479 5943 * Register capabilities with IO Fault Services
5480 5944 */
5481 5945 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5482 5946
5483 5947 /*
5484 5948 * Initialize pci ereport capabilities if ereport capable
5485 5949 */
5486 5950 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5487 5951 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5488 5952 pci_ereport_setup(ixgbe->dip);
5489 5953
5490 5954 /*
5491 5955 * Register error callback if error callback capable
5492 5956 */
5493 5957 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5494 5958 ddi_fm_handler_register(ixgbe->dip,
5495 5959 ixgbe_fm_error_cb, (void*) ixgbe);
5496 5960 }
5497 5961 }
5498 5962
5499 5963 static void
5500 5964 ixgbe_fm_fini(ixgbe_t *ixgbe)
5501 5965 {
5502 5966 /*
5503 5967 * Only unregister FMA capabilities if they are registered
5504 5968 */
5505 5969 if (ixgbe->fm_capabilities) {
5506 5970
5507 5971 /*
5508 5972 * Release any resources allocated by pci_ereport_setup()
5509 5973 */
5510 5974 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5511 5975 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5512 5976 pci_ereport_teardown(ixgbe->dip);
5513 5977
5514 5978 /*
5515 5979 * Un-register error callback if error callback capable
5516 5980 */
5517 5981 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5518 5982 ddi_fm_handler_unregister(ixgbe->dip);
5519 5983
5520 5984 /*
5521 5985 * Unregister from IO Fault Service
5522 5986 */
5523 5987 ddi_fm_fini(ixgbe->dip);
5524 5988 }
5525 5989 }
5526 5990
5527 5991 void
5528 5992 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5529 5993 {
5530 5994 uint64_t ena;
5531 5995 char buf[FM_MAX_CLASS];
5532 5996
5533 5997 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5534 5998 ena = fm_ena_generate(0, FM_ENA_FMT1);
5535 5999 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5536 6000 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5537 6001 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5538 6002 }
5539 6003 }
5540 6004
5541 6005 static int
5542 6006 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5543 6007 {
5544 6008 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5545 6009
5546 6010 mutex_enter(&rx_ring->rx_lock);
5547 6011 rx_ring->ring_gen_num = mr_gen_num;
5548 6012 mutex_exit(&rx_ring->rx_lock);
5549 6013 return (0);
5550 6014 }
5551 6015
5552 6016 /*
5553 6017 * Get the global ring index by a ring index within a group.
5554 6018 */
5555 6019 static int
5556 6020 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5557 6021 {
5558 6022 ixgbe_rx_ring_t *rx_ring;
5559 6023 int i;
5560 6024
5561 6025 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5562 6026 rx_ring = &ixgbe->rx_rings[i];
5563 6027 if (rx_ring->group_index == gindex)
5564 6028 rindex--;
5565 6029 if (rindex < 0)
5566 6030 return (i);
5567 6031 }
5568 6032
5569 6033 return (-1);
5570 6034 }
5571 6035
5572 6036 /*
5573 6037 * Callback funtion for MAC layer to register all rings.
5574 6038 */
5575 6039 /* ARGSUSED */
5576 6040 void
5577 6041 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5578 6042 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5579 6043 {
5580 6044 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5581 6045 mac_intr_t *mintr = &infop->mri_intr;
5582 6046
5583 6047 switch (rtype) {
5584 6048 case MAC_RING_TYPE_RX: {
5585 6049 /*
5586 6050 * 'index' is the ring index within the group.
5587 6051 * Need to get the global ring index by searching in groups.
5588 6052 */
5589 6053 int global_ring_index = ixgbe_get_rx_ring_index(
5590 6054 ixgbe, group_index, ring_index);
5591 6055
5592 6056 ASSERT(global_ring_index >= 0);
5593 6057
5594 6058 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5595 6059 rx_ring->ring_handle = rh;
5596 6060
5597 6061 infop->mri_driver = (mac_ring_driver_t)rx_ring;
5598 6062 infop->mri_start = ixgbe_ring_start;
5599 6063 infop->mri_stop = NULL;
5600 6064 infop->mri_poll = ixgbe_ring_rx_poll;
5601 6065 infop->mri_stat = ixgbe_rx_ring_stat;
5602 6066
5603 6067 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5604 6068 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5605 6069 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5606 6070 if (ixgbe->intr_type &
5607 6071 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5608 6072 mintr->mi_ddi_handle =
5609 6073 ixgbe->htable[rx_ring->intr_vector];
5610 6074 }
5611 6075
5612 6076 break;
5613 6077 }
5614 6078 case MAC_RING_TYPE_TX: {
5615 6079 ASSERT(group_index == -1);
5616 6080 ASSERT(ring_index < ixgbe->num_tx_rings);
5617 6081
5618 6082 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5619 6083 tx_ring->ring_handle = rh;
5620 6084
5621 6085 infop->mri_driver = (mac_ring_driver_t)tx_ring;
5622 6086 infop->mri_start = NULL;
5623 6087 infop->mri_stop = NULL;
5624 6088 infop->mri_tx = ixgbe_ring_tx;
5625 6089 infop->mri_stat = ixgbe_tx_ring_stat;
5626 6090 if (ixgbe->intr_type &
5627 6091 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5628 6092 mintr->mi_ddi_handle =
5629 6093 ixgbe->htable[tx_ring->intr_vector];
5630 6094 }
5631 6095 break;
5632 6096 }
5633 6097 default:
5634 6098 break;
5635 6099 }
5636 6100 }
5637 6101
5638 6102 /*
5639 6103 * Callback funtion for MAC layer to register all groups.
5640 6104 */
5641 6105 void
5642 6106 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5643 6107 mac_group_info_t *infop, mac_group_handle_t gh)
5644 6108 {
5645 6109 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5646 6110
5647 6111 switch (rtype) {
5648 6112 case MAC_RING_TYPE_RX: {
5649 6113 ixgbe_rx_group_t *rx_group;
5650 6114
5651 6115 rx_group = &ixgbe->rx_groups[index];
5652 6116 rx_group->group_handle = gh;
5653 6117
5654 6118 infop->mgi_driver = (mac_group_driver_t)rx_group;
5655 6119 infop->mgi_start = NULL;
5656 6120 infop->mgi_stop = NULL;
5657 6121 infop->mgi_addmac = ixgbe_addmac;
5658 6122 infop->mgi_remmac = ixgbe_remmac;
5659 6123 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5660 6124
5661 6125 break;
5662 6126 }
5663 6127 case MAC_RING_TYPE_TX:
5664 6128 break;
5665 6129 default:
5666 6130 break;
5667 6131 }
5668 6132 }
5669 6133
5670 6134 /*
5671 6135 * Enable interrupt on the specificed rx ring.
5672 6136 */
5673 6137 int
5674 6138 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5675 6139 {
5676 6140 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5677 6141 ixgbe_t *ixgbe = rx_ring->ixgbe;
5678 6142 int r_idx = rx_ring->index;
5679 6143 int hw_r_idx = rx_ring->hw_index;
5680 6144 int v_idx = rx_ring->intr_vector;
5681 6145
5682 6146 mutex_enter(&ixgbe->gen_lock);
5683 6147 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5684 6148 mutex_exit(&ixgbe->gen_lock);
5685 6149 /*
5686 6150 * Simply return 0.
5687 6151 * Interrupts are being adjusted. ixgbe_intr_adjust()
5688 6152 * will eventually re-enable the interrupt when it's
5689 6153 * done with the adjustment.
5690 6154 */
5691 6155 return (0);
5692 6156 }
5693 6157
5694 6158 /*
5695 6159 * To enable interrupt by setting the VAL bit of given interrupt
5696 6160 * vector allocation register (IVAR).
5697 6161 */
5698 6162 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5699 6163
5700 6164 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5701 6165
5702 6166 /*
5703 6167 * Trigger a Rx interrupt on this ring
5704 6168 */
5705 6169 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5706 6170 IXGBE_WRITE_FLUSH(&ixgbe->hw);
5707 6171
5708 6172 mutex_exit(&ixgbe->gen_lock);
5709 6173
5710 6174 return (0);
5711 6175 }
5712 6176
5713 6177 /*
5714 6178 * Disable interrupt on the specificed rx ring.
5715 6179 */
5716 6180 int
5717 6181 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5718 6182 {
5719 6183 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5720 6184 ixgbe_t *ixgbe = rx_ring->ixgbe;
5721 6185 int r_idx = rx_ring->index;
5722 6186 int hw_r_idx = rx_ring->hw_index;
5723 6187 int v_idx = rx_ring->intr_vector;
5724 6188
5725 6189 mutex_enter(&ixgbe->gen_lock);
5726 6190 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5727 6191 mutex_exit(&ixgbe->gen_lock);
5728 6192 /*
5729 6193 * Simply return 0.
5730 6194 * In the rare case where an interrupt is being
5731 6195 * disabled while interrupts are being adjusted,
5732 6196 * we don't fail the operation. No interrupts will
5733 6197 * be generated while they are adjusted, and
5734 6198 * ixgbe_intr_adjust() will cause the interrupts
5735 6199 * to be re-enabled once it completes. Note that
5736 6200 * in this case, packets may be delivered to the
5737 6201 * stack via interrupts before xgbe_rx_ring_intr_enable()
5738 6202 * is called again. This is acceptable since interrupt
5739 6203 * adjustment is infrequent, and the stack will be
5740 6204 * able to handle these packets.
5741 6205 */
5742 6206 return (0);
5743 6207 }
5744 6208
5745 6209 /*
5746 6210 * To disable interrupt by clearing the VAL bit of given interrupt
5747 6211 * vector allocation register (IVAR).
5748 6212 */
5749 6213 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5750 6214
5751 6215 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5752 6216
5753 6217 mutex_exit(&ixgbe->gen_lock);
5754 6218
5755 6219 return (0);
5756 6220 }
5757 6221
5758 6222 /*
5759 6223 * Add a mac address.
5760 6224 */
5761 6225 static int
5762 6226 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5763 6227 {
5764 6228 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5765 6229 ixgbe_t *ixgbe = rx_group->ixgbe;
5766 6230 struct ixgbe_hw *hw = &ixgbe->hw;
5767 6231 int slot, i;
5768 6232
5769 6233 mutex_enter(&ixgbe->gen_lock);
5770 6234
5771 6235 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5772 6236 mutex_exit(&ixgbe->gen_lock);
5773 6237 return (ECANCELED);
5774 6238 }
5775 6239
5776 6240 if (ixgbe->unicst_avail == 0) {
5777 6241 /* no slots available */
5778 6242 mutex_exit(&ixgbe->gen_lock);
5779 6243 return (ENOSPC);
5780 6244 }
5781 6245
5782 6246 /*
5783 6247 * The first ixgbe->num_rx_groups slots are reserved for each respective
5784 6248 * group. The rest slots are shared by all groups. While adding a
5785 6249 * MAC address, reserved slots are firstly checked then the shared
5786 6250 * slots are searched.
5787 6251 */
5788 6252 slot = -1;
5789 6253 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5790 6254 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5791 6255 if (ixgbe->unicst_addr[i].mac.set == 0) {
5792 6256 slot = i;
5793 6257 break;
5794 6258 }
5795 6259 }
5796 6260 } else {
5797 6261 slot = rx_group->index;
5798 6262 }
5799 6263
5800 6264 if (slot == -1) {
5801 6265 /* no slots available */
5802 6266 mutex_exit(&ixgbe->gen_lock);
5803 6267 return (ENOSPC);
5804 6268 }
5805 6269
5806 6270 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5807 6271 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5808 6272 rx_group->index, IXGBE_RAH_AV);
5809 6273 ixgbe->unicst_addr[slot].mac.set = 1;
5810 6274 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5811 6275 ixgbe->unicst_avail--;
5812 6276
5813 6277 mutex_exit(&ixgbe->gen_lock);
5814 6278
5815 6279 return (0);
5816 6280 }
5817 6281
5818 6282 /*
5819 6283 * Remove a mac address.
5820 6284 */
5821 6285 static int
5822 6286 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5823 6287 {
5824 6288 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5825 6289 ixgbe_t *ixgbe = rx_group->ixgbe;
5826 6290 struct ixgbe_hw *hw = &ixgbe->hw;
5827 6291 int slot;
5828 6292
5829 6293 mutex_enter(&ixgbe->gen_lock);
5830 6294
5831 6295 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5832 6296 mutex_exit(&ixgbe->gen_lock);
5833 6297 return (ECANCELED);
5834 6298 }
5835 6299
5836 6300 slot = ixgbe_unicst_find(ixgbe, mac_addr);
5837 6301 if (slot == -1) {
5838 6302 mutex_exit(&ixgbe->gen_lock);
5839 6303 return (EINVAL);
5840 6304 }
5841 6305
5842 6306 if (ixgbe->unicst_addr[slot].mac.set == 0) {
5843 6307 mutex_exit(&ixgbe->gen_lock);
5844 6308 return (EINVAL);
5845 6309 }
5846 6310
5847 6311 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5848 6312 (void) ixgbe_clear_rar(hw, slot);
5849 6313 ixgbe->unicst_addr[slot].mac.set = 0;
5850 6314 ixgbe->unicst_avail++;
5851 6315
5852 6316 mutex_exit(&ixgbe->gen_lock);
5853 6317
5854 6318 return (0);
5855 6319 }
↓ open down ↓ |
504 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX