1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 33 */ 34 35 #include "ixgbe_sw.h" 36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 38 39 /* 40 * Local function protoypes 41 */ 42 static int ixgbe_register_mac(ixgbe_t *); 43 static int ixgbe_identify_hardware(ixgbe_t *); 44 static int ixgbe_regs_map(ixgbe_t *); 45 static void ixgbe_init_properties(ixgbe_t *); 46 static int ixgbe_init_driver_settings(ixgbe_t *); 47 static void ixgbe_init_locks(ixgbe_t *); 48 static void ixgbe_destroy_locks(ixgbe_t *); 49 static int ixgbe_init(ixgbe_t *); 50 static int ixgbe_chip_start(ixgbe_t *); 51 static void ixgbe_chip_stop(ixgbe_t *); 52 static int ixgbe_reset(ixgbe_t *); 53 static void ixgbe_tx_clean(ixgbe_t *); 54 static boolean_t ixgbe_tx_drain(ixgbe_t *); 55 static boolean_t ixgbe_rx_drain(ixgbe_t *); 56 static int ixgbe_alloc_rings(ixgbe_t *); 57 static void ixgbe_free_rings(ixgbe_t *); 58 static int ixgbe_alloc_rx_data(ixgbe_t *); 59 static void ixgbe_free_rx_data(ixgbe_t *); 60 static void ixgbe_setup_rings(ixgbe_t *); 61 static void ixgbe_setup_rx(ixgbe_t *); 62 static void ixgbe_setup_tx(ixgbe_t *); 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 65 static void ixgbe_setup_rss(ixgbe_t *); 66 static void ixgbe_setup_vmdq(ixgbe_t *); 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 68 static void ixgbe_setup_rss_table(ixgbe_t *); 69 static void ixgbe_init_unicst(ixgbe_t *); 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 71 static void ixgbe_setup_multicst(ixgbe_t *); 72 static void ixgbe_get_hw_state(ixgbe_t *); 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 74 static void ixgbe_get_conf(ixgbe_t *); 75 static void ixgbe_init_params(ixgbe_t *); 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 77 static void ixgbe_driver_link_check(ixgbe_t *); 78 static void ixgbe_sfp_check(void *); 79 static void ixgbe_overtemp_check(void *); 80 static void ixgbe_phy_check(void *); 81 static void ixgbe_link_timer(void *); 82 static void ixgbe_local_timer(void *); 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 87 static boolean_t is_valid_mac_addr(uint8_t *); 88 static boolean_t ixgbe_stall_check(ixgbe_t *); 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 92 static int ixgbe_alloc_intrs(ixgbe_t *); 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 94 static int ixgbe_add_intr_handlers(ixgbe_t *); 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 102 static void ixgbe_setup_adapter_vector(ixgbe_t *); 103 static void ixgbe_rem_intr_handlers(ixgbe_t *); 104 static void ixgbe_rem_intrs(ixgbe_t *); 105 static int ixgbe_enable_intrs(ixgbe_t *); 106 static int ixgbe_disable_intrs(ixgbe_t *); 107 static uint_t ixgbe_intr_legacy(void *, void *); 108 static uint_t ixgbe_intr_msi(void *, void *); 109 static uint_t ixgbe_intr_msix(void *, void *); 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 113 static void ixgbe_get_driver_control(struct ixgbe_hw *); 114 static int ixgbe_addmac(void *, const uint8_t *); 115 static int ixgbe_remmac(void *, const uint8_t *); 116 static void ixgbe_release_driver_control(struct ixgbe_hw *); 117 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 120 static int ixgbe_resume(dev_info_t *); 121 static int ixgbe_suspend(dev_info_t *); 122 static int ixgbe_quiesce(dev_info_t *); 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 126 static int ixgbe_intr_cb_register(ixgbe_t *); 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 128 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 130 const void *impl_data); 131 static void ixgbe_fm_init(ixgbe_t *); 132 static void ixgbe_fm_fini(ixgbe_t *); 133 134 char *ixgbe_priv_props[] = { 135 "_tx_copy_thresh", 136 "_tx_recycle_thresh", 137 "_tx_overload_thresh", 138 "_tx_resched_thresh", 139 "_rx_copy_thresh", 140 "_rx_limit_per_intr", 141 "_intr_throttling", 142 "_adv_pause_cap", 143 "_adv_asym_pause_cap", 144 NULL 145 }; 146 147 #define IXGBE_MAX_PRIV_PROPS \ 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 149 150 static struct cb_ops ixgbe_cb_ops = { 151 nulldev, /* cb_open */ 152 nulldev, /* cb_close */ 153 nodev, /* cb_strategy */ 154 nodev, /* cb_print */ 155 nodev, /* cb_dump */ 156 nodev, /* cb_read */ 157 nodev, /* cb_write */ 158 nodev, /* cb_ioctl */ 159 nodev, /* cb_devmap */ 160 nodev, /* cb_mmap */ 161 nodev, /* cb_segmap */ 162 nochpoll, /* cb_chpoll */ 163 ddi_prop_op, /* cb_prop_op */ 164 NULL, /* cb_stream */ 165 D_MP | D_HOTPLUG, /* cb_flag */ 166 CB_REV, /* cb_rev */ 167 nodev, /* cb_aread */ 168 nodev /* cb_awrite */ 169 }; 170 171 static struct dev_ops ixgbe_dev_ops = { 172 DEVO_REV, /* devo_rev */ 173 0, /* devo_refcnt */ 174 NULL, /* devo_getinfo */ 175 nulldev, /* devo_identify */ 176 nulldev, /* devo_probe */ 177 ixgbe_attach, /* devo_attach */ 178 ixgbe_detach, /* devo_detach */ 179 nodev, /* devo_reset */ 180 &ixgbe_cb_ops, /* devo_cb_ops */ 181 NULL, /* devo_bus_ops */ 182 ddi_power, /* devo_power */ 183 ixgbe_quiesce, /* devo_quiesce */ 184 }; 185 186 static struct modldrv ixgbe_modldrv = { 187 &mod_driverops, /* Type of module. This one is a driver */ 188 ixgbe_ident, /* Discription string */ 189 &ixgbe_dev_ops /* driver ops */ 190 }; 191 192 static struct modlinkage ixgbe_modlinkage = { 193 MODREV_1, &ixgbe_modldrv, NULL 194 }; 195 196 /* 197 * Access attributes for register mapping 198 */ 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 200 DDI_DEVICE_ATTR_V1, 201 DDI_STRUCTURE_LE_ACC, 202 DDI_STRICTORDER_ACC, 203 DDI_FLAGERR_ACC 204 }; 205 206 /* 207 * Loopback property 208 */ 209 static lb_property_t lb_normal = { 210 normal, "normal", IXGBE_LB_NONE 211 }; 212 213 static lb_property_t lb_mac = { 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC 215 }; 216 217 static lb_property_t lb_external = { 218 external, "External", IXGBE_LB_EXTERNAL 219 }; 220 221 #define IXGBE_M_CALLBACK_FLAGS \ 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 223 224 static mac_callbacks_t ixgbe_m_callbacks = { 225 IXGBE_M_CALLBACK_FLAGS, 226 ixgbe_m_stat, 227 ixgbe_m_start, 228 ixgbe_m_stop, 229 ixgbe_m_promisc, 230 ixgbe_m_multicst, 231 NULL, 232 NULL, 233 NULL, 234 ixgbe_m_ioctl, 235 ixgbe_m_getcapab, 236 NULL, 237 NULL, 238 ixgbe_m_setprop, 239 ixgbe_m_getprop, 240 ixgbe_m_propinfo 241 }; 242 243 /* 244 * Initialize capabilities of each supported adapter type 245 */ 246 static adapter_info_t ixgbe_82598eb_cap = { 247 64, /* maximum number of rx queues */ 248 1, /* minimum number of rx queues */ 249 64, /* default number of rx queues */ 250 16, /* maximum number of rx groups */ 251 1, /* minimum number of rx groups */ 252 1, /* default number of rx groups */ 253 32, /* maximum number of tx queues */ 254 1, /* minimum number of tx queues */ 255 8, /* default number of tx queues */ 256 16366, /* maximum MTU size */ 257 0xFFFF, /* maximum interrupt throttle rate */ 258 0, /* minimum interrupt throttle rate */ 259 200, /* default interrupt throttle rate */ 260 18, /* maximum total msix vectors */ 261 16, /* maximum number of ring vectors */ 262 2, /* maximum number of other vectors */ 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 264 0, /* "other" interrupt types enable mask */ 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 266 | IXGBE_FLAG_RSS_CAPABLE 267 | IXGBE_FLAG_VMDQ_CAPABLE) 268 }; 269 270 static adapter_info_t ixgbe_82599eb_cap = { 271 128, /* maximum number of rx queues */ 272 1, /* minimum number of rx queues */ 273 128, /* default number of rx queues */ 274 64, /* maximum number of rx groups */ 275 1, /* minimum number of rx groups */ 276 1, /* default number of rx groups */ 277 128, /* maximum number of tx queues */ 278 1, /* minimum number of tx queues */ 279 8, /* default number of tx queues */ 280 15500, /* maximum MTU size */ 281 0xFF8, /* maximum interrupt throttle rate */ 282 0, /* minimum interrupt throttle rate */ 283 200, /* default interrupt throttle rate */ 284 64, /* maximum total msix vectors */ 285 16, /* maximum number of ring vectors */ 286 2, /* maximum number of other vectors */ 287 (IXGBE_EICR_LSC 288 | IXGBE_EICR_GPI_SDP1 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 290 291 (IXGBE_SDP1_GPIEN 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 293 294 (IXGBE_FLAG_DCA_CAPABLE 295 | IXGBE_FLAG_RSS_CAPABLE 296 | IXGBE_FLAG_VMDQ_CAPABLE 297 | IXGBE_FLAG_RSC_CAPABLE 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 299 }; 300 301 static adapter_info_t ixgbe_X540_cap = { 302 128, /* maximum number of rx queues */ 303 1, /* minimum number of rx queues */ 304 128, /* default number of rx queues */ 305 64, /* maximum number of rx groups */ 306 1, /* minimum number of rx groups */ 307 1, /* default number of rx groups */ 308 128, /* maximum number of tx queues */ 309 1, /* minimum number of tx queues */ 310 8, /* default number of tx queues */ 311 15500, /* maximum MTU size */ 312 0xFF8, /* maximum interrupt throttle rate */ 313 0, /* minimum interrupt throttle rate */ 314 200, /* default interrupt throttle rate */ 315 64, /* maximum total msix vectors */ 316 16, /* maximum number of ring vectors */ 317 2, /* maximum number of other vectors */ 318 (IXGBE_EICR_LSC 319 | IXGBE_EICR_GPI_SDP1_X540 320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ 321 322 (IXGBE_SDP1_GPIEN_X540 323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ 324 325 (IXGBE_FLAG_DCA_CAPABLE 326 | IXGBE_FLAG_RSS_CAPABLE 327 | IXGBE_FLAG_VMDQ_CAPABLE 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 329 }; 330 331 static adapter_info_t ixgbe_X550_cap = { 332 128, /* maximum number of rx queues */ 333 1, /* minimum number of rx queues */ 334 128, /* default number of rx queues */ 335 64, /* maximum number of rx groups */ 336 1, /* minimum number of rx groups */ 337 1, /* default number of rx groups */ 338 128, /* maximum number of tx queues */ 339 1, /* minimum number of tx queues */ 340 8, /* default number of tx queues */ 341 15500, /* maximum MTU size */ 342 0xFF8, /* maximum interrupt throttle rate */ 343 0, /* minimum interrupt throttle rate */ 344 0x200, /* default interrupt throttle rate */ 345 64, /* maximum total msix vectors */ 346 16, /* maximum number of ring vectors */ 347 2, /* maximum number of other vectors */ 348 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 349 0, /* "other" interrupt types enable mask */ 350 (IXGBE_FLAG_RSS_CAPABLE 351 | IXGBE_FLAG_VMDQ_CAPABLE 352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 353 }; 354 355 /* 356 * Module Initialization Functions. 357 */ 358 359 int 360 _init(void) 361 { 362 int status; 363 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 365 366 status = mod_install(&ixgbe_modlinkage); 367 368 if (status != DDI_SUCCESS) { 369 mac_fini_ops(&ixgbe_dev_ops); 370 } 371 372 return (status); 373 } 374 375 int 376 _fini(void) 377 { 378 int status; 379 380 status = mod_remove(&ixgbe_modlinkage); 381 382 if (status == DDI_SUCCESS) { 383 mac_fini_ops(&ixgbe_dev_ops); 384 } 385 386 return (status); 387 } 388 389 int 390 _info(struct modinfo *modinfop) 391 { 392 int status; 393 394 status = mod_info(&ixgbe_modlinkage, modinfop); 395 396 return (status); 397 } 398 399 /* 400 * ixgbe_attach - Driver attach. 401 * 402 * This function is the device specific initialization entry 403 * point. This entry point is required and must be written. 404 * The DDI_ATTACH command must be provided in the attach entry 405 * point. When attach() is called with cmd set to DDI_ATTACH, 406 * all normal kernel services (such as kmem_alloc(9F)) are 407 * available for use by the driver. 408 * 409 * The attach() function will be called once for each instance 410 * of the device on the system with cmd set to DDI_ATTACH. 411 * Until attach() succeeds, the only driver entry points which 412 * may be called are open(9E) and getinfo(9E). 413 */ 414 static int 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 416 { 417 ixgbe_t *ixgbe; 418 struct ixgbe_osdep *osdep; 419 struct ixgbe_hw *hw; 420 int instance; 421 char taskqname[32]; 422 423 /* 424 * Check the command and perform corresponding operations 425 */ 426 switch (cmd) { 427 default: 428 return (DDI_FAILURE); 429 430 case DDI_RESUME: 431 return (ixgbe_resume(devinfo)); 432 433 case DDI_ATTACH: 434 break; 435 } 436 437 /* Get the device instance */ 438 instance = ddi_get_instance(devinfo); 439 440 /* Allocate memory for the instance data structure */ 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 442 443 ixgbe->dip = devinfo; 444 ixgbe->instance = instance; 445 446 hw = &ixgbe->hw; 447 osdep = &ixgbe->osdep; 448 hw->back = osdep; 449 osdep->ixgbe = ixgbe; 450 451 /* Attach the instance pointer to the dev_info data structure */ 452 ddi_set_driver_private(devinfo, ixgbe); 453 454 /* 455 * Initialize for FMA support 456 */ 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 460 ixgbe_fm_init(ixgbe); 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 462 463 /* 464 * Map PCI config space registers 465 */ 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 467 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 468 goto attach_fail; 469 } 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 471 472 /* 473 * Identify the chipset family 474 */ 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 476 ixgbe_error(ixgbe, "Failed to identify hardware"); 477 goto attach_fail; 478 } 479 480 /* 481 * Map device registers 482 */ 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to map device registers"); 485 goto attach_fail; 486 } 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 488 489 /* 490 * Initialize driver parameters 491 */ 492 ixgbe_init_properties(ixgbe); 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 494 495 /* 496 * Register interrupt callback 497 */ 498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 499 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 500 goto attach_fail; 501 } 502 503 /* 504 * Allocate interrupts 505 */ 506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 507 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 508 goto attach_fail; 509 } 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 511 512 /* 513 * Allocate rx/tx rings based on the ring numbers. 514 * The actual numbers of rx/tx rings are decided by the number of 515 * allocated interrupt vectors, so we should allocate the rings after 516 * interrupts are allocated. 517 */ 518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 520 goto attach_fail; 521 } 522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 523 524 /* 525 * Map rings to interrupt vectors 526 */ 527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 529 goto attach_fail; 530 } 531 532 /* 533 * Add interrupt handlers 534 */ 535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 536 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 540 541 /* 542 * Create a taskq for sfp-change 543 */ 544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 546 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 547 ixgbe_error(ixgbe, "sfp_taskq create failed"); 548 goto attach_fail; 549 } 550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 551 552 /* 553 * Create a taskq for over-temp 554 */ 555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 557 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 558 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 559 goto attach_fail; 560 } 561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 562 563 /* 564 * Create a taskq for processing external PHY interrupts 565 */ 566 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); 567 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, 568 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 569 ixgbe_error(ixgbe, "phy_taskq create failed"); 570 goto attach_fail; 571 } 572 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; 573 574 /* 575 * Initialize driver parameters 576 */ 577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 578 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 579 goto attach_fail; 580 } 581 582 /* 583 * Initialize mutexes for this device. 584 * Do this before enabling the interrupt handler and 585 * register the softint to avoid the condition where 586 * interrupt handler can try using uninitialized mutex. 587 */ 588 ixgbe_init_locks(ixgbe); 589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 590 591 /* 592 * Initialize chipset hardware 593 */ 594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 595 ixgbe_error(ixgbe, "Failed to initialize adapter"); 596 goto attach_fail; 597 } 598 ixgbe->link_check_complete = B_FALSE; 599 ixgbe->link_check_hrtime = gethrtime() + 600 (IXGBE_LINK_UP_TIME * 100000000ULL); 601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 602 603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 605 goto attach_fail; 606 } 607 608 /* 609 * Initialize adapter capabilities 610 */ 611 ixgbe_init_params(ixgbe); 612 613 /* 614 * Initialize statistics 615 */ 616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 617 ixgbe_error(ixgbe, "Failed to initialize statistics"); 618 goto attach_fail; 619 } 620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 621 622 /* 623 * Register the driver to the MAC 624 */ 625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 626 ixgbe_error(ixgbe, "Failed to register MAC"); 627 goto attach_fail; 628 } 629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 631 632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 634 if (ixgbe->periodic_id == 0) { 635 ixgbe_error(ixgbe, "Failed to add the link check timer"); 636 goto attach_fail; 637 } 638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 639 640 /* 641 * Now that mutex locks are initialized, and the chip is also 642 * initialized, enable interrupts. 643 */ 644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 646 goto attach_fail; 647 } 648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 649 650 ixgbe_log(ixgbe, "%s", ixgbe_ident); 651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 652 653 return (DDI_SUCCESS); 654 655 attach_fail: 656 ixgbe_unconfigure(devinfo, ixgbe); 657 return (DDI_FAILURE); 658 } 659 660 /* 661 * ixgbe_detach - Driver detach. 662 * 663 * The detach() function is the complement of the attach routine. 664 * If cmd is set to DDI_DETACH, detach() is used to remove the 665 * state associated with a given instance of a device node 666 * prior to the removal of that instance from the system. 667 * 668 * The detach() function will be called once for each instance 669 * of the device for which there has been a successful attach() 670 * once there are no longer any opens on the device. 671 * 672 * Interrupts routine are disabled, All memory allocated by this 673 * driver are freed. 674 */ 675 static int 676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 677 { 678 ixgbe_t *ixgbe; 679 680 /* 681 * Check detach command 682 */ 683 switch (cmd) { 684 default: 685 return (DDI_FAILURE); 686 687 case DDI_SUSPEND: 688 return (ixgbe_suspend(devinfo)); 689 690 case DDI_DETACH: 691 break; 692 } 693 694 /* 695 * Get the pointer to the driver private data structure 696 */ 697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 698 if (ixgbe == NULL) 699 return (DDI_FAILURE); 700 701 /* 702 * If the device is still running, it needs to be stopped first. 703 * This check is necessary because under some specific circumstances, 704 * the detach routine can be called without stopping the interface 705 * first. 706 */ 707 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 709 mutex_enter(&ixgbe->gen_lock); 710 ixgbe_stop(ixgbe, B_TRUE); 711 mutex_exit(&ixgbe->gen_lock); 712 /* Disable and stop the watchdog timer */ 713 ixgbe_disable_watchdog_timer(ixgbe); 714 } 715 716 /* 717 * Check if there are still rx buffers held by the upper layer. 718 * If so, fail the detach. 719 */ 720 if (!ixgbe_rx_drain(ixgbe)) 721 return (DDI_FAILURE); 722 723 /* 724 * Do the remaining unconfigure routines 725 */ 726 ixgbe_unconfigure(devinfo, ixgbe); 727 728 return (DDI_SUCCESS); 729 } 730 731 /* 732 * quiesce(9E) entry point. 733 * 734 * This function is called when the system is single-threaded at high 735 * PIL with preemption disabled. Therefore, this function must not be 736 * blocked. 737 * 738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 739 * DDI_FAILURE indicates an error condition and should almost never happen. 740 */ 741 static int 742 ixgbe_quiesce(dev_info_t *devinfo) 743 { 744 ixgbe_t *ixgbe; 745 struct ixgbe_hw *hw; 746 747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 748 749 if (ixgbe == NULL) 750 return (DDI_FAILURE); 751 752 hw = &ixgbe->hw; 753 754 /* 755 * Disable the adapter interrupts 756 */ 757 ixgbe_disable_adapter_interrupts(ixgbe); 758 759 /* 760 * Tell firmware driver is no longer in control 761 */ 762 ixgbe_release_driver_control(hw); 763 764 /* 765 * Reset the chipset 766 */ 767 (void) ixgbe_reset_hw(hw); 768 769 /* 770 * Reset PHY 771 */ 772 (void) ixgbe_reset_phy(hw); 773 774 return (DDI_SUCCESS); 775 } 776 777 static void 778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 779 { 780 /* 781 * Disable interrupt 782 */ 783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 784 (void) ixgbe_disable_intrs(ixgbe); 785 } 786 787 /* 788 * remove the link check timer 789 */ 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 791 if (ixgbe->periodic_id != NULL) { 792 ddi_periodic_delete(ixgbe->periodic_id); 793 ixgbe->periodic_id = NULL; 794 } 795 } 796 797 /* 798 * Unregister MAC 799 */ 800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 801 (void) mac_unregister(ixgbe->mac_hdl); 802 } 803 804 /* 805 * Free statistics 806 */ 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 809 } 810 811 /* 812 * Remove interrupt handlers 813 */ 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 815 ixgbe_rem_intr_handlers(ixgbe); 816 } 817 818 /* 819 * Remove taskq for sfp-status-change 820 */ 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 822 ddi_taskq_destroy(ixgbe->sfp_taskq); 823 } 824 825 /* 826 * Remove taskq for over-temp 827 */ 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 829 ddi_taskq_destroy(ixgbe->overtemp_taskq); 830 } 831 832 /* 833 * Remove taskq for external PHYs 834 */ 835 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { 836 ddi_taskq_destroy(ixgbe->phy_taskq); 837 } 838 839 /* 840 * Remove interrupts 841 */ 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 843 ixgbe_rem_intrs(ixgbe); 844 } 845 846 /* 847 * Unregister interrupt callback handler 848 */ 849 (void) ddi_cb_unregister(ixgbe->cb_hdl); 850 851 /* 852 * Remove driver properties 853 */ 854 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 855 (void) ddi_prop_remove_all(devinfo); 856 } 857 858 /* 859 * Stop the chipset 860 */ 861 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 862 mutex_enter(&ixgbe->gen_lock); 863 ixgbe_chip_stop(ixgbe); 864 mutex_exit(&ixgbe->gen_lock); 865 } 866 867 /* 868 * Free register handle 869 */ 870 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 871 if (ixgbe->osdep.reg_handle != NULL) 872 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 873 } 874 875 /* 876 * Free PCI config handle 877 */ 878 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 879 if (ixgbe->osdep.cfg_handle != NULL) 880 pci_config_teardown(&ixgbe->osdep.cfg_handle); 881 } 882 883 /* 884 * Free locks 885 */ 886 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 887 ixgbe_destroy_locks(ixgbe); 888 } 889 890 /* 891 * Free the rx/tx rings 892 */ 893 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 894 ixgbe_free_rings(ixgbe); 895 } 896 897 /* 898 * Unregister FMA capabilities 899 */ 900 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 901 ixgbe_fm_fini(ixgbe); 902 } 903 904 /* 905 * Free the driver data structure 906 */ 907 kmem_free(ixgbe, sizeof (ixgbe_t)); 908 909 ddi_set_driver_private(devinfo, NULL); 910 } 911 912 /* 913 * ixgbe_register_mac - Register the driver and its function pointers with 914 * the GLD interface. 915 */ 916 static int 917 ixgbe_register_mac(ixgbe_t *ixgbe) 918 { 919 struct ixgbe_hw *hw = &ixgbe->hw; 920 mac_register_t *mac; 921 int status; 922 923 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 924 return (IXGBE_FAILURE); 925 926 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 927 mac->m_driver = ixgbe; 928 mac->m_dip = ixgbe->dip; 929 mac->m_src_addr = hw->mac.addr; 930 mac->m_callbacks = &ixgbe_m_callbacks; 931 mac->m_min_sdu = 0; 932 mac->m_max_sdu = ixgbe->default_mtu; 933 mac->m_margin = VLAN_TAGSZ; 934 mac->m_priv_props = ixgbe_priv_props; 935 mac->m_v12n = MAC_VIRT_LEVEL1; 936 937 status = mac_register(mac, &ixgbe->mac_hdl); 938 939 mac_free(mac); 940 941 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 942 } 943 944 /* 945 * ixgbe_identify_hardware - Identify the type of the chipset. 946 */ 947 static int 948 ixgbe_identify_hardware(ixgbe_t *ixgbe) 949 { 950 struct ixgbe_hw *hw = &ixgbe->hw; 951 struct ixgbe_osdep *osdep = &ixgbe->osdep; 952 953 /* 954 * Get the device id 955 */ 956 hw->vendor_id = 957 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 958 hw->device_id = 959 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 960 hw->revision_id = 961 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 962 hw->subsystem_device_id = 963 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 964 hw->subsystem_vendor_id = 965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 966 967 /* 968 * Set the mac type of the adapter based on the device id 969 */ 970 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 971 return (IXGBE_FAILURE); 972 } 973 974 /* 975 * Install adapter capabilities 976 */ 977 switch (hw->mac.type) { 978 case ixgbe_mac_82598EB: 979 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 980 ixgbe->capab = &ixgbe_82598eb_cap; 981 982 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 983 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 984 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 985 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 986 } 987 break; 988 989 case ixgbe_mac_82599EB: 990 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 991 ixgbe->capab = &ixgbe_82599eb_cap; 992 993 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 994 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 995 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 996 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 997 } 998 break; 999 1000 case ixgbe_mac_X540: 1001 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 1002 ixgbe->capab = &ixgbe_X540_cap; 1003 /* 1004 * For now, X540 is all set in its capab structure. 1005 * As other X540 variants show up, things can change here. 1006 */ 1007 break; 1008 1009 case ixgbe_mac_X550: 1010 case ixgbe_mac_X550EM_x: 1011 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); 1012 ixgbe->capab = &ixgbe_X550_cap; 1013 1014 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1015 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; 1016 1017 /* 1018 * Link detection on X552 SFP+ and X552/X557-AT 1019 */ 1020 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1021 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 1022 ixgbe->capab->other_intr |= 1023 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 1024 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; 1025 } 1026 break; 1027 1028 default: 1029 IXGBE_DEBUGLOG_1(ixgbe, 1030 "adapter not supported in ixgbe_identify_hardware(): %d\n", 1031 hw->mac.type); 1032 return (IXGBE_FAILURE); 1033 } 1034 1035 return (IXGBE_SUCCESS); 1036 } 1037 1038 /* 1039 * ixgbe_regs_map - Map the device registers. 1040 * 1041 */ 1042 static int 1043 ixgbe_regs_map(ixgbe_t *ixgbe) 1044 { 1045 dev_info_t *devinfo = ixgbe->dip; 1046 struct ixgbe_hw *hw = &ixgbe->hw; 1047 struct ixgbe_osdep *osdep = &ixgbe->osdep; 1048 off_t mem_size; 1049 1050 /* 1051 * First get the size of device registers to be mapped. 1052 */ 1053 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 1054 != DDI_SUCCESS) { 1055 return (IXGBE_FAILURE); 1056 } 1057 1058 /* 1059 * Call ddi_regs_map_setup() to map registers 1060 */ 1061 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 1062 (caddr_t *)&hw->hw_addr, 0, 1063 mem_size, &ixgbe_regs_acc_attr, 1064 &osdep->reg_handle)) != DDI_SUCCESS) { 1065 return (IXGBE_FAILURE); 1066 } 1067 1068 return (IXGBE_SUCCESS); 1069 } 1070 1071 /* 1072 * ixgbe_init_properties - Initialize driver properties. 1073 */ 1074 static void 1075 ixgbe_init_properties(ixgbe_t *ixgbe) 1076 { 1077 /* 1078 * Get conf file properties, including link settings 1079 * jumbo frames, ring number, descriptor number, etc. 1080 */ 1081 ixgbe_get_conf(ixgbe); 1082 } 1083 1084 /* 1085 * ixgbe_init_driver_settings - Initialize driver settings. 1086 * 1087 * The settings include hardware function pointers, bus information, 1088 * rx/tx rings settings, link state, and any other parameters that 1089 * need to be setup during driver initialization. 1090 */ 1091 static int 1092 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 1093 { 1094 struct ixgbe_hw *hw = &ixgbe->hw; 1095 dev_info_t *devinfo = ixgbe->dip; 1096 ixgbe_rx_ring_t *rx_ring; 1097 ixgbe_rx_group_t *rx_group; 1098 ixgbe_tx_ring_t *tx_ring; 1099 uint32_t rx_size; 1100 uint32_t tx_size; 1101 uint32_t ring_per_group; 1102 int i; 1103 1104 /* 1105 * Initialize chipset specific hardware function pointers 1106 */ 1107 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 1108 return (IXGBE_FAILURE); 1109 } 1110 1111 /* 1112 * Get the system page size 1113 */ 1114 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1115 1116 /* 1117 * Set rx buffer size 1118 * 1119 * The IP header alignment room is counted in the calculation. 1120 * The rx buffer size is in unit of 1K that is required by the 1121 * chipset hardware. 1122 */ 1123 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1124 ixgbe->rx_buf_size = ((rx_size >> 10) + 1125 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1126 1127 /* 1128 * Set tx buffer size 1129 */ 1130 tx_size = ixgbe->max_frame_size; 1131 ixgbe->tx_buf_size = ((tx_size >> 10) + 1132 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1133 1134 /* 1135 * Initialize rx/tx rings/groups parameters 1136 */ 1137 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1138 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1139 rx_ring = &ixgbe->rx_rings[i]; 1140 rx_ring->index = i; 1141 rx_ring->ixgbe = ixgbe; 1142 rx_ring->group_index = i / ring_per_group; 1143 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1144 } 1145 1146 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1147 rx_group = &ixgbe->rx_groups[i]; 1148 rx_group->index = i; 1149 rx_group->ixgbe = ixgbe; 1150 } 1151 1152 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1153 tx_ring = &ixgbe->tx_rings[i]; 1154 tx_ring->index = i; 1155 tx_ring->ixgbe = ixgbe; 1156 if (ixgbe->tx_head_wb_enable) 1157 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1158 else 1159 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1160 1161 tx_ring->ring_size = ixgbe->tx_ring_size; 1162 tx_ring->free_list_size = ixgbe->tx_ring_size + 1163 (ixgbe->tx_ring_size >> 1); 1164 } 1165 1166 /* 1167 * Initialize values of interrupt throttling rate 1168 */ 1169 for (i = 1; i < MAX_INTR_VECTOR; i++) 1170 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1171 1172 /* 1173 * The initial link state should be "unknown" 1174 */ 1175 ixgbe->link_state = LINK_STATE_UNKNOWN; 1176 1177 return (IXGBE_SUCCESS); 1178 } 1179 1180 /* 1181 * ixgbe_init_locks - Initialize locks. 1182 */ 1183 static void 1184 ixgbe_init_locks(ixgbe_t *ixgbe) 1185 { 1186 ixgbe_rx_ring_t *rx_ring; 1187 ixgbe_tx_ring_t *tx_ring; 1188 int i; 1189 1190 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1191 rx_ring = &ixgbe->rx_rings[i]; 1192 mutex_init(&rx_ring->rx_lock, NULL, 1193 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1194 } 1195 1196 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1197 tx_ring = &ixgbe->tx_rings[i]; 1198 mutex_init(&tx_ring->tx_lock, NULL, 1199 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1200 mutex_init(&tx_ring->recycle_lock, NULL, 1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1202 mutex_init(&tx_ring->tcb_head_lock, NULL, 1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1204 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1206 } 1207 1208 mutex_init(&ixgbe->gen_lock, NULL, 1209 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1210 1211 mutex_init(&ixgbe->watchdog_lock, NULL, 1212 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1213 } 1214 1215 /* 1216 * ixgbe_destroy_locks - Destroy locks. 1217 */ 1218 static void 1219 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1220 { 1221 ixgbe_rx_ring_t *rx_ring; 1222 ixgbe_tx_ring_t *tx_ring; 1223 int i; 1224 1225 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1226 rx_ring = &ixgbe->rx_rings[i]; 1227 mutex_destroy(&rx_ring->rx_lock); 1228 } 1229 1230 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1231 tx_ring = &ixgbe->tx_rings[i]; 1232 mutex_destroy(&tx_ring->tx_lock); 1233 mutex_destroy(&tx_ring->recycle_lock); 1234 mutex_destroy(&tx_ring->tcb_head_lock); 1235 mutex_destroy(&tx_ring->tcb_tail_lock); 1236 } 1237 1238 mutex_destroy(&ixgbe->gen_lock); 1239 mutex_destroy(&ixgbe->watchdog_lock); 1240 } 1241 1242 static int 1243 ixgbe_resume(dev_info_t *devinfo) 1244 { 1245 ixgbe_t *ixgbe; 1246 int i; 1247 1248 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1249 if (ixgbe == NULL) 1250 return (DDI_FAILURE); 1251 1252 mutex_enter(&ixgbe->gen_lock); 1253 1254 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1255 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1256 mutex_exit(&ixgbe->gen_lock); 1257 return (DDI_FAILURE); 1258 } 1259 1260 /* 1261 * Enable and start the watchdog timer 1262 */ 1263 ixgbe_enable_watchdog_timer(ixgbe); 1264 } 1265 1266 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1267 1268 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1269 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1270 mac_tx_ring_update(ixgbe->mac_hdl, 1271 ixgbe->tx_rings[i].ring_handle); 1272 } 1273 } 1274 1275 mutex_exit(&ixgbe->gen_lock); 1276 1277 return (DDI_SUCCESS); 1278 } 1279 1280 static int 1281 ixgbe_suspend(dev_info_t *devinfo) 1282 { 1283 ixgbe_t *ixgbe; 1284 1285 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1286 if (ixgbe == NULL) 1287 return (DDI_FAILURE); 1288 1289 mutex_enter(&ixgbe->gen_lock); 1290 1291 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1292 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1293 mutex_exit(&ixgbe->gen_lock); 1294 return (DDI_SUCCESS); 1295 } 1296 ixgbe_stop(ixgbe, B_FALSE); 1297 1298 mutex_exit(&ixgbe->gen_lock); 1299 1300 /* 1301 * Disable and stop the watchdog timer 1302 */ 1303 ixgbe_disable_watchdog_timer(ixgbe); 1304 1305 return (DDI_SUCCESS); 1306 } 1307 1308 /* 1309 * ixgbe_init - Initialize the device. 1310 */ 1311 static int 1312 ixgbe_init(ixgbe_t *ixgbe) 1313 { 1314 struct ixgbe_hw *hw = &ixgbe->hw; 1315 u8 pbanum[IXGBE_PBANUM_LENGTH]; 1316 int rv; 1317 1318 mutex_enter(&ixgbe->gen_lock); 1319 1320 /* 1321 * Configure/Initialize hardware 1322 */ 1323 rv = ixgbe_init_hw(hw); 1324 if (rv != IXGBE_SUCCESS) { 1325 switch (rv) { 1326 1327 /* 1328 * The first three errors are not prohibitive to us progressing 1329 * further, and are maily advisory in nature. In the case of a 1330 * SFP module not being present or not deemed supported by the 1331 * common code, we adivse the operator of this fact but carry on 1332 * instead of failing hard, as SFPs can be inserted or replaced 1333 * while the driver is running. In the case of a unknown error, 1334 * we fail-hard, logging the reason and emitting a FMA event. 1335 */ 1336 case IXGBE_ERR_EEPROM_VERSION: 1337 ixgbe_error(ixgbe, 1338 "This Intel 10Gb Ethernet device is pre-release and" 1339 " contains outdated firmware. Please contact your" 1340 " hardware vendor for a replacement."); 1341 break; 1342 case IXGBE_ERR_SFP_NOT_PRESENT: 1343 ixgbe_error(ixgbe, 1344 "No SFP+ module detected on this interface. Please " 1345 "install a supported SFP+ module for this " 1346 "interface to become operational."); 1347 break; 1348 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1349 ixgbe_error(ixgbe, 1350 "Unsupported SFP+ module detected. Please replace " 1351 "it with a supported SFP+ module per Intel " 1352 "documentation, or bypass this check with " 1353 "allow_unsupported_sfp=1 in ixgbe.conf."); 1354 break; 1355 default: 1356 ixgbe_error(ixgbe, 1357 "Failed to initialize hardware. ixgbe_init_hw " 1358 "returned %d", rv); 1359 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1360 goto init_fail; 1361 } 1362 } 1363 1364 /* 1365 * Need to init eeprom before validating the checksum. 1366 */ 1367 if (ixgbe_init_eeprom_params(hw) < 0) { 1368 ixgbe_error(ixgbe, 1369 "Unable to intitialize the eeprom interface."); 1370 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1371 goto init_fail; 1372 } 1373 1374 /* 1375 * NVM validation 1376 */ 1377 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1378 /* 1379 * Some PCI-E parts fail the first check due to 1380 * the link being in sleep state. Call it again, 1381 * if it fails a second time it's a real issue. 1382 */ 1383 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1384 ixgbe_error(ixgbe, 1385 "Invalid NVM checksum. Please contact " 1386 "the vendor to update the NVM."); 1387 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1388 goto init_fail; 1389 } 1390 } 1391 1392 /* 1393 * Setup default flow control thresholds - enable/disable 1394 * & flow control type is controlled by ixgbe.conf 1395 */ 1396 hw->fc.high_water[0] = DEFAULT_FCRTH; 1397 hw->fc.low_water[0] = DEFAULT_FCRTL; 1398 hw->fc.pause_time = DEFAULT_FCPAUSE; 1399 hw->fc.send_xon = B_TRUE; 1400 1401 /* 1402 * Initialize flow control 1403 */ 1404 (void) ixgbe_start_hw(hw); 1405 1406 /* 1407 * Initialize link settings 1408 */ 1409 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1410 1411 /* 1412 * Initialize the chipset hardware 1413 */ 1414 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1415 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1416 goto init_fail; 1417 } 1418 1419 /* 1420 * Read identifying information and place in devinfo. 1421 */ 1422 pbanum[0] = '\0'; 1423 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); 1424 if (*pbanum != '\0') { 1425 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, 1426 "printed-board-assembly", (char *)pbanum); 1427 } 1428 1429 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1430 goto init_fail; 1431 } 1432 1433 mutex_exit(&ixgbe->gen_lock); 1434 return (IXGBE_SUCCESS); 1435 1436 init_fail: 1437 /* 1438 * Reset PHY 1439 */ 1440 (void) ixgbe_reset_phy(hw); 1441 1442 mutex_exit(&ixgbe->gen_lock); 1443 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1444 return (IXGBE_FAILURE); 1445 } 1446 1447 /* 1448 * ixgbe_chip_start - Initialize and start the chipset hardware. 1449 */ 1450 static int 1451 ixgbe_chip_start(ixgbe_t *ixgbe) 1452 { 1453 struct ixgbe_hw *hw = &ixgbe->hw; 1454 int i; 1455 1456 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1457 1458 /* 1459 * Get the mac address 1460 * This function should handle SPARC case correctly. 1461 */ 1462 if (!ixgbe_find_mac_address(ixgbe)) { 1463 ixgbe_error(ixgbe, "Failed to get the mac address"); 1464 return (IXGBE_FAILURE); 1465 } 1466 1467 /* 1468 * Validate the mac address 1469 */ 1470 (void) ixgbe_init_rx_addrs(hw); 1471 if (!is_valid_mac_addr(hw->mac.addr)) { 1472 ixgbe_error(ixgbe, "Invalid mac address"); 1473 return (IXGBE_FAILURE); 1474 } 1475 1476 /* 1477 * Re-enable relaxed ordering for performance. It is disabled 1478 * by default in the hardware init. 1479 */ 1480 if (ixgbe->relax_order_enable == B_TRUE) 1481 ixgbe_enable_relaxed_ordering(hw); 1482 1483 /* 1484 * Setup adapter interrupt vectors 1485 */ 1486 ixgbe_setup_adapter_vector(ixgbe); 1487 1488 /* 1489 * Initialize unicast addresses. 1490 */ 1491 ixgbe_init_unicst(ixgbe); 1492 1493 /* 1494 * Setup and initialize the mctable structures. 1495 */ 1496 ixgbe_setup_multicst(ixgbe); 1497 1498 /* 1499 * Set interrupt throttling rate 1500 */ 1501 for (i = 0; i < ixgbe->intr_cnt; i++) { 1502 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1503 } 1504 1505 /* 1506 * Disable Wake-on-LAN 1507 */ 1508 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 1509 1510 /* 1511 * Some adapters offer Energy Efficient Ethernet (EEE) support. 1512 * Due to issues with EEE in e1000g/igb, we disable this by default 1513 * as a precautionary measure. 1514 * 1515 * Currently, the only known adapter which supports EEE in the ixgbe 1516 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the 1517 * first revision of it, as well as any X550 with MAC type 6 (non-EM) 1518 */ 1519 (void) ixgbe_setup_eee(hw, B_FALSE); 1520 1521 /* 1522 * Turn on any present SFP Tx laser 1523 */ 1524 ixgbe_enable_tx_laser(hw); 1525 1526 /* 1527 * Power on the PHY 1528 */ 1529 (void) ixgbe_set_phy_power(hw, B_TRUE); 1530 1531 /* 1532 * Save the state of the PHY 1533 */ 1534 ixgbe_get_hw_state(ixgbe); 1535 1536 /* 1537 * Make sure driver has control 1538 */ 1539 ixgbe_get_driver_control(hw); 1540 1541 return (IXGBE_SUCCESS); 1542 } 1543 1544 /* 1545 * ixgbe_chip_stop - Stop the chipset hardware 1546 */ 1547 static void 1548 ixgbe_chip_stop(ixgbe_t *ixgbe) 1549 { 1550 struct ixgbe_hw *hw = &ixgbe->hw; 1551 int rv; 1552 1553 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1554 1555 /* 1556 * Stop interupt generation and disable Tx unit 1557 */ 1558 hw->adapter_stopped = B_FALSE; 1559 (void) ixgbe_stop_adapter(hw); 1560 1561 /* 1562 * Reset the chipset 1563 */ 1564 (void) ixgbe_reset_hw(hw); 1565 1566 /* 1567 * Reset PHY 1568 */ 1569 (void) ixgbe_reset_phy(hw); 1570 1571 /* 1572 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting 1573 * the PHY while doing so. Else, just power down the PHY. 1574 */ 1575 if (hw->phy.ops.enter_lplu != NULL) { 1576 hw->phy.reset_disable = B_TRUE; 1577 rv = hw->phy.ops.enter_lplu(hw); 1578 if (rv != IXGBE_SUCCESS) 1579 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); 1580 hw->phy.reset_disable = B_FALSE; 1581 } else { 1582 (void) ixgbe_set_phy_power(hw, B_FALSE); 1583 } 1584 1585 /* 1586 * Turn off any present SFP Tx laser 1587 * Expected for health and safety reasons 1588 */ 1589 ixgbe_disable_tx_laser(hw); 1590 1591 /* 1592 * Tell firmware driver is no longer in control 1593 */ 1594 ixgbe_release_driver_control(hw); 1595 1596 } 1597 1598 /* 1599 * ixgbe_reset - Reset the chipset and re-start the driver. 1600 * 1601 * It involves stopping and re-starting the chipset, 1602 * and re-configuring the rx/tx rings. 1603 */ 1604 static int 1605 ixgbe_reset(ixgbe_t *ixgbe) 1606 { 1607 int i; 1608 1609 /* 1610 * Disable and stop the watchdog timer 1611 */ 1612 ixgbe_disable_watchdog_timer(ixgbe); 1613 1614 mutex_enter(&ixgbe->gen_lock); 1615 1616 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1617 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1618 1619 ixgbe_stop(ixgbe, B_FALSE); 1620 1621 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1622 mutex_exit(&ixgbe->gen_lock); 1623 return (IXGBE_FAILURE); 1624 } 1625 1626 /* 1627 * After resetting, need to recheck the link status. 1628 */ 1629 ixgbe->link_check_complete = B_FALSE; 1630 ixgbe->link_check_hrtime = gethrtime() + 1631 (IXGBE_LINK_UP_TIME * 100000000ULL); 1632 1633 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1634 1635 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1636 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1637 mac_tx_ring_update(ixgbe->mac_hdl, 1638 ixgbe->tx_rings[i].ring_handle); 1639 } 1640 } 1641 1642 mutex_exit(&ixgbe->gen_lock); 1643 1644 /* 1645 * Enable and start the watchdog timer 1646 */ 1647 ixgbe_enable_watchdog_timer(ixgbe); 1648 1649 return (IXGBE_SUCCESS); 1650 } 1651 1652 /* 1653 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1654 */ 1655 static void 1656 ixgbe_tx_clean(ixgbe_t *ixgbe) 1657 { 1658 ixgbe_tx_ring_t *tx_ring; 1659 tx_control_block_t *tcb; 1660 link_list_t pending_list; 1661 uint32_t desc_num; 1662 int i, j; 1663 1664 LINK_LIST_INIT(&pending_list); 1665 1666 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1667 tx_ring = &ixgbe->tx_rings[i]; 1668 1669 mutex_enter(&tx_ring->recycle_lock); 1670 1671 /* 1672 * Clean the pending tx data - the pending packets in the 1673 * work_list that have no chances to be transmitted again. 1674 * 1675 * We must ensure the chipset is stopped or the link is down 1676 * before cleaning the transmit packets. 1677 */ 1678 desc_num = 0; 1679 for (j = 0; j < tx_ring->ring_size; j++) { 1680 tcb = tx_ring->work_list[j]; 1681 if (tcb != NULL) { 1682 desc_num += tcb->desc_num; 1683 1684 tx_ring->work_list[j] = NULL; 1685 1686 ixgbe_free_tcb(tcb); 1687 1688 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1689 } 1690 } 1691 1692 if (desc_num > 0) { 1693 atomic_add_32(&tx_ring->tbd_free, desc_num); 1694 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1695 1696 /* 1697 * Reset the head and tail pointers of the tbd ring; 1698 * Reset the writeback head if it's enable. 1699 */ 1700 tx_ring->tbd_head = 0; 1701 tx_ring->tbd_tail = 0; 1702 if (ixgbe->tx_head_wb_enable) 1703 *tx_ring->tbd_head_wb = 0; 1704 1705 IXGBE_WRITE_REG(&ixgbe->hw, 1706 IXGBE_TDH(tx_ring->index), 0); 1707 IXGBE_WRITE_REG(&ixgbe->hw, 1708 IXGBE_TDT(tx_ring->index), 0); 1709 } 1710 1711 mutex_exit(&tx_ring->recycle_lock); 1712 1713 /* 1714 * Add the tx control blocks in the pending list to 1715 * the free list. 1716 */ 1717 ixgbe_put_free_list(tx_ring, &pending_list); 1718 } 1719 } 1720 1721 /* 1722 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1723 * transmitted. 1724 */ 1725 static boolean_t 1726 ixgbe_tx_drain(ixgbe_t *ixgbe) 1727 { 1728 ixgbe_tx_ring_t *tx_ring; 1729 boolean_t done; 1730 int i, j; 1731 1732 /* 1733 * Wait for a specific time to allow pending tx packets 1734 * to be transmitted. 1735 * 1736 * Check the counter tbd_free to see if transmission is done. 1737 * No lock protection is needed here. 1738 * 1739 * Return B_TRUE if all pending packets have been transmitted; 1740 * Otherwise return B_FALSE; 1741 */ 1742 for (i = 0; i < TX_DRAIN_TIME; i++) { 1743 1744 done = B_TRUE; 1745 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1746 tx_ring = &ixgbe->tx_rings[j]; 1747 done = done && 1748 (tx_ring->tbd_free == tx_ring->ring_size); 1749 } 1750 1751 if (done) 1752 break; 1753 1754 msec_delay(1); 1755 } 1756 1757 return (done); 1758 } 1759 1760 /* 1761 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1762 */ 1763 static boolean_t 1764 ixgbe_rx_drain(ixgbe_t *ixgbe) 1765 { 1766 boolean_t done = B_TRUE; 1767 int i; 1768 1769 /* 1770 * Polling the rx free list to check if those rx buffers held by 1771 * the upper layer are released. 1772 * 1773 * Check the counter rcb_free to see if all pending buffers are 1774 * released. No lock protection is needed here. 1775 * 1776 * Return B_TRUE if all pending buffers have been released; 1777 * Otherwise return B_FALSE; 1778 */ 1779 for (i = 0; i < RX_DRAIN_TIME; i++) { 1780 done = (ixgbe->rcb_pending == 0); 1781 1782 if (done) 1783 break; 1784 1785 msec_delay(1); 1786 } 1787 1788 return (done); 1789 } 1790 1791 /* 1792 * ixgbe_start - Start the driver/chipset. 1793 */ 1794 int 1795 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1796 { 1797 struct ixgbe_hw *hw = &ixgbe->hw; 1798 int i; 1799 1800 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1801 1802 if (alloc_buffer) { 1803 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1804 ixgbe_error(ixgbe, 1805 "Failed to allocate software receive rings"); 1806 return (IXGBE_FAILURE); 1807 } 1808 1809 /* Allocate buffers for all the rx/tx rings */ 1810 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1811 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1812 return (IXGBE_FAILURE); 1813 } 1814 1815 ixgbe->tx_ring_init = B_TRUE; 1816 } else { 1817 ixgbe->tx_ring_init = B_FALSE; 1818 } 1819 1820 for (i = 0; i < ixgbe->num_rx_rings; i++) 1821 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1822 for (i = 0; i < ixgbe->num_tx_rings; i++) 1823 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1824 1825 /* 1826 * Start the chipset hardware 1827 */ 1828 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1829 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1830 goto start_failure; 1831 } 1832 1833 /* 1834 * Configure link now for X550 1835 * 1836 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the 1837 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, 1838 * the resting state of the link would be the maximum speed that 1839 * autonegotiation will allow (usually 10Gb, infrastructure allowing) 1840 * so we never bothered with explicitly setting the link to 10Gb as it 1841 * would already be at that state on driver attach. With X550, we must 1842 * trigger a re-negotiation of the link in order to switch from a LPLU 1843 * 1Gb link to 10Gb (cable and link partner permitting.) 1844 */ 1845 if (hw->mac.type == ixgbe_mac_X550 || 1846 hw->mac.type == ixgbe_mac_X550EM_x) { 1847 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); 1848 ixgbe_get_hw_state(ixgbe); 1849 } 1850 1851 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1852 goto start_failure; 1853 } 1854 1855 /* 1856 * Setup the rx/tx rings 1857 */ 1858 ixgbe_setup_rings(ixgbe); 1859 1860 /* 1861 * ixgbe_start() will be called when resetting, however if reset 1862 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1863 * before enabling the interrupts. 1864 */ 1865 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1866 | IXGBE_STALL| IXGBE_OVERTEMP)); 1867 1868 /* 1869 * Enable adapter interrupts 1870 * The interrupts must be enabled after the driver state is START 1871 */ 1872 ixgbe_enable_adapter_interrupts(ixgbe); 1873 1874 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1875 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1876 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1877 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1878 1879 return (IXGBE_SUCCESS); 1880 1881 start_failure: 1882 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1883 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1884 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1885 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1886 1887 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1888 1889 return (IXGBE_FAILURE); 1890 } 1891 1892 /* 1893 * ixgbe_stop - Stop the driver/chipset. 1894 */ 1895 void 1896 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1897 { 1898 int i; 1899 1900 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1901 1902 /* 1903 * Disable the adapter interrupts 1904 */ 1905 ixgbe_disable_adapter_interrupts(ixgbe); 1906 1907 /* 1908 * Drain the pending tx packets 1909 */ 1910 (void) ixgbe_tx_drain(ixgbe); 1911 1912 for (i = 0; i < ixgbe->num_rx_rings; i++) 1913 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1914 for (i = 0; i < ixgbe->num_tx_rings; i++) 1915 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1916 1917 /* 1918 * Stop the chipset hardware 1919 */ 1920 ixgbe_chip_stop(ixgbe); 1921 1922 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1923 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1924 } 1925 1926 /* 1927 * Clean the pending tx data/resources 1928 */ 1929 ixgbe_tx_clean(ixgbe); 1930 1931 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1932 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1933 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1934 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1935 1936 if (ixgbe->link_state == LINK_STATE_UP) { 1937 ixgbe->link_state = LINK_STATE_UNKNOWN; 1938 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1939 } 1940 1941 if (free_buffer) { 1942 /* 1943 * Release the DMA/memory resources of rx/tx rings 1944 */ 1945 ixgbe_free_dma(ixgbe); 1946 ixgbe_free_rx_data(ixgbe); 1947 } 1948 } 1949 1950 /* 1951 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1952 */ 1953 /* ARGSUSED */ 1954 static int 1955 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1956 void *arg1, void *arg2) 1957 { 1958 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 1959 1960 switch (cbaction) { 1961 /* IRM callback */ 1962 int count; 1963 case DDI_CB_INTR_ADD: 1964 case DDI_CB_INTR_REMOVE: 1965 count = (int)(uintptr_t)cbarg; 1966 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 1967 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 1968 int, ixgbe->intr_cnt); 1969 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 1970 DDI_SUCCESS) { 1971 ixgbe_error(ixgbe, 1972 "IRM CB: Failed to adjust interrupts"); 1973 goto cb_fail; 1974 } 1975 break; 1976 default: 1977 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 1978 cbaction); 1979 return (DDI_ENOTSUP); 1980 } 1981 return (DDI_SUCCESS); 1982 cb_fail: 1983 return (DDI_FAILURE); 1984 } 1985 1986 /* 1987 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 1988 */ 1989 static int 1990 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 1991 { 1992 int i, rc, actual; 1993 1994 if (count == 0) 1995 return (DDI_SUCCESS); 1996 1997 if ((cbaction == DDI_CB_INTR_ADD && 1998 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 1999 (cbaction == DDI_CB_INTR_REMOVE && 2000 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 2001 return (DDI_FAILURE); 2002 2003 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 2004 return (DDI_FAILURE); 2005 } 2006 2007 for (i = 0; i < ixgbe->num_rx_rings; i++) 2008 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 2009 for (i = 0; i < ixgbe->num_tx_rings; i++) 2010 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 2011 2012 mutex_enter(&ixgbe->gen_lock); 2013 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 2014 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 2015 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 2016 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 2017 2018 ixgbe_stop(ixgbe, B_FALSE); 2019 /* 2020 * Disable interrupts 2021 */ 2022 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 2023 rc = ixgbe_disable_intrs(ixgbe); 2024 ASSERT(rc == IXGBE_SUCCESS); 2025 } 2026 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 2027 2028 /* 2029 * Remove interrupt handlers 2030 */ 2031 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 2032 ixgbe_rem_intr_handlers(ixgbe); 2033 } 2034 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 2035 2036 /* 2037 * Clear vect_map 2038 */ 2039 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 2040 switch (cbaction) { 2041 case DDI_CB_INTR_ADD: 2042 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 2043 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 2044 DDI_INTR_ALLOC_NORMAL); 2045 if (rc != DDI_SUCCESS || actual != count) { 2046 ixgbe_log(ixgbe, "Adjust interrupts failed." 2047 "return: %d, irm cb size: %d, actual: %d", 2048 rc, count, actual); 2049 goto intr_adjust_fail; 2050 } 2051 ixgbe->intr_cnt += count; 2052 break; 2053 2054 case DDI_CB_INTR_REMOVE: 2055 for (i = ixgbe->intr_cnt - count; 2056 i < ixgbe->intr_cnt; i ++) { 2057 rc = ddi_intr_free(ixgbe->htable[i]); 2058 ixgbe->htable[i] = NULL; 2059 if (rc != DDI_SUCCESS) { 2060 ixgbe_log(ixgbe, "Adjust interrupts failed." 2061 "return: %d, irm cb size: %d, actual: %d", 2062 rc, count, actual); 2063 goto intr_adjust_fail; 2064 } 2065 } 2066 ixgbe->intr_cnt -= count; 2067 break; 2068 } 2069 2070 /* 2071 * Get priority for first vector, assume remaining are all the same 2072 */ 2073 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 2074 if (rc != DDI_SUCCESS) { 2075 ixgbe_log(ixgbe, 2076 "Get interrupt priority failed: %d", rc); 2077 goto intr_adjust_fail; 2078 } 2079 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 2080 if (rc != DDI_SUCCESS) { 2081 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 2082 goto intr_adjust_fail; 2083 } 2084 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 2085 2086 /* 2087 * Map rings to interrupt vectors 2088 */ 2089 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 2090 ixgbe_error(ixgbe, 2091 "IRM CB: Failed to map interrupts to vectors"); 2092 goto intr_adjust_fail; 2093 } 2094 2095 /* 2096 * Add interrupt handlers 2097 */ 2098 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 2099 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 2100 goto intr_adjust_fail; 2101 } 2102 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 2103 2104 /* 2105 * Now that mutex locks are initialized, and the chip is also 2106 * initialized, enable interrupts. 2107 */ 2108 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 2109 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 2110 goto intr_adjust_fail; 2111 } 2112 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 2113 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 2114 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 2115 goto intr_adjust_fail; 2116 } 2117 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 2118 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 2119 ixgbe->ixgbe_state |= IXGBE_STARTED; 2120 mutex_exit(&ixgbe->gen_lock); 2121 2122 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2123 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 2124 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 2125 } 2126 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2127 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 2128 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 2129 } 2130 2131 /* Wakeup all Tx rings */ 2132 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2133 mac_tx_ring_update(ixgbe->mac_hdl, 2134 ixgbe->tx_rings[i].ring_handle); 2135 } 2136 2137 IXGBE_DEBUGLOG_3(ixgbe, 2138 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 2139 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 2140 return (DDI_SUCCESS); 2141 2142 intr_adjust_fail: 2143 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2144 mutex_exit(&ixgbe->gen_lock); 2145 return (DDI_FAILURE); 2146 } 2147 2148 /* 2149 * ixgbe_intr_cb_register - Register interrupt callback function. 2150 */ 2151 static int 2152 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 2153 { 2154 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 2155 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 2156 return (IXGBE_FAILURE); 2157 } 2158 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 2159 return (IXGBE_SUCCESS); 2160 } 2161 2162 /* 2163 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 2164 */ 2165 static int 2166 ixgbe_alloc_rings(ixgbe_t *ixgbe) 2167 { 2168 /* 2169 * Allocate memory space for rx rings 2170 */ 2171 ixgbe->rx_rings = kmem_zalloc( 2172 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 2173 KM_NOSLEEP); 2174 2175 if (ixgbe->rx_rings == NULL) { 2176 return (IXGBE_FAILURE); 2177 } 2178 2179 /* 2180 * Allocate memory space for tx rings 2181 */ 2182 ixgbe->tx_rings = kmem_zalloc( 2183 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 2184 KM_NOSLEEP); 2185 2186 if (ixgbe->tx_rings == NULL) { 2187 kmem_free(ixgbe->rx_rings, 2188 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2189 ixgbe->rx_rings = NULL; 2190 return (IXGBE_FAILURE); 2191 } 2192 2193 /* 2194 * Allocate memory space for rx ring groups 2195 */ 2196 ixgbe->rx_groups = kmem_zalloc( 2197 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 2198 KM_NOSLEEP); 2199 2200 if (ixgbe->rx_groups == NULL) { 2201 kmem_free(ixgbe->rx_rings, 2202 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2203 kmem_free(ixgbe->tx_rings, 2204 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2205 ixgbe->rx_rings = NULL; 2206 ixgbe->tx_rings = NULL; 2207 return (IXGBE_FAILURE); 2208 } 2209 2210 return (IXGBE_SUCCESS); 2211 } 2212 2213 /* 2214 * ixgbe_free_rings - Free the memory space of rx/tx rings. 2215 */ 2216 static void 2217 ixgbe_free_rings(ixgbe_t *ixgbe) 2218 { 2219 if (ixgbe->rx_rings != NULL) { 2220 kmem_free(ixgbe->rx_rings, 2221 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2222 ixgbe->rx_rings = NULL; 2223 } 2224 2225 if (ixgbe->tx_rings != NULL) { 2226 kmem_free(ixgbe->tx_rings, 2227 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2228 ixgbe->tx_rings = NULL; 2229 } 2230 2231 if (ixgbe->rx_groups != NULL) { 2232 kmem_free(ixgbe->rx_groups, 2233 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2234 ixgbe->rx_groups = NULL; 2235 } 2236 } 2237 2238 static int 2239 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2240 { 2241 ixgbe_rx_ring_t *rx_ring; 2242 int i; 2243 2244 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2245 rx_ring = &ixgbe->rx_rings[i]; 2246 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2247 goto alloc_rx_rings_failure; 2248 } 2249 return (IXGBE_SUCCESS); 2250 2251 alloc_rx_rings_failure: 2252 ixgbe_free_rx_data(ixgbe); 2253 return (IXGBE_FAILURE); 2254 } 2255 2256 static void 2257 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2258 { 2259 ixgbe_rx_ring_t *rx_ring; 2260 ixgbe_rx_data_t *rx_data; 2261 int i; 2262 2263 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2264 rx_ring = &ixgbe->rx_rings[i]; 2265 2266 mutex_enter(&ixgbe->rx_pending_lock); 2267 rx_data = rx_ring->rx_data; 2268 2269 if (rx_data != NULL) { 2270 rx_data->flag |= IXGBE_RX_STOPPED; 2271 2272 if (rx_data->rcb_pending == 0) { 2273 ixgbe_free_rx_ring_data(rx_data); 2274 rx_ring->rx_data = NULL; 2275 } 2276 } 2277 2278 mutex_exit(&ixgbe->rx_pending_lock); 2279 } 2280 } 2281 2282 /* 2283 * ixgbe_setup_rings - Setup rx/tx rings. 2284 */ 2285 static void 2286 ixgbe_setup_rings(ixgbe_t *ixgbe) 2287 { 2288 /* 2289 * Setup the rx/tx rings, including the following: 2290 * 2291 * 1. Setup the descriptor ring and the control block buffers; 2292 * 2. Initialize necessary registers for receive/transmit; 2293 * 3. Initialize software pointers/parameters for receive/transmit; 2294 */ 2295 ixgbe_setup_rx(ixgbe); 2296 2297 ixgbe_setup_tx(ixgbe); 2298 } 2299 2300 static void 2301 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2302 { 2303 ixgbe_t *ixgbe = rx_ring->ixgbe; 2304 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2305 struct ixgbe_hw *hw = &ixgbe->hw; 2306 rx_control_block_t *rcb; 2307 union ixgbe_adv_rx_desc *rbd; 2308 uint32_t size; 2309 uint32_t buf_low; 2310 uint32_t buf_high; 2311 uint32_t reg_val; 2312 int i; 2313 2314 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2315 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2316 2317 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2318 rcb = rx_data->work_list[i]; 2319 rbd = &rx_data->rbd_ring[i]; 2320 2321 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2322 rbd->read.hdr_addr = NULL; 2323 } 2324 2325 /* 2326 * Initialize the length register 2327 */ 2328 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2329 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2330 2331 /* 2332 * Initialize the base address registers 2333 */ 2334 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2335 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2336 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2337 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2338 2339 /* 2340 * Setup head & tail pointers 2341 */ 2342 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2343 rx_data->ring_size - 1); 2344 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2345 2346 rx_data->rbd_next = 0; 2347 rx_data->lro_first = 0; 2348 2349 /* 2350 * Setup the Receive Descriptor Control Register (RXDCTL) 2351 * PTHRESH=32 descriptors (half the internal cache) 2352 * HTHRESH=0 descriptors (to minimize latency on fetch) 2353 * WTHRESH defaults to 1 (writeback each descriptor) 2354 */ 2355 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2356 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2357 2358 /* Not a valid value for 82599, X540 or X550 */ 2359 if (hw->mac.type == ixgbe_mac_82598EB) { 2360 reg_val |= 0x0020; /* pthresh */ 2361 } 2362 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2363 2364 if (hw->mac.type == ixgbe_mac_82599EB || 2365 hw->mac.type == ixgbe_mac_X540 || 2366 hw->mac.type == ixgbe_mac_X550 || 2367 hw->mac.type == ixgbe_mac_X550EM_x) { 2368 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2369 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2370 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2371 } 2372 2373 /* 2374 * Setup the Split and Replication Receive Control Register. 2375 * Set the rx buffer size and the advanced descriptor type. 2376 */ 2377 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2378 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2379 reg_val |= IXGBE_SRRCTL_DROP_EN; 2380 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2381 } 2382 2383 static void 2384 ixgbe_setup_rx(ixgbe_t *ixgbe) 2385 { 2386 ixgbe_rx_ring_t *rx_ring; 2387 struct ixgbe_hw *hw = &ixgbe->hw; 2388 uint32_t reg_val; 2389 uint32_t ring_mapping; 2390 uint32_t i, index; 2391 uint32_t psrtype_rss_bit; 2392 2393 /* 2394 * Ensure that Rx is disabled while setting up 2395 * the Rx unit and Rx descriptor ring(s) 2396 */ 2397 ixgbe_disable_rx(hw); 2398 2399 /* PSRTYPE must be configured for 82599 */ 2400 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2401 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2402 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2403 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2404 reg_val |= IXGBE_PSRTYPE_L2HDR; 2405 reg_val |= 0x80000000; 2406 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2407 } else { 2408 if (ixgbe->num_rx_groups > 32) { 2409 psrtype_rss_bit = 0x20000000; 2410 } else { 2411 psrtype_rss_bit = 0x40000000; 2412 } 2413 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2414 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2415 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2416 reg_val |= IXGBE_PSRTYPE_L2HDR; 2417 reg_val |= psrtype_rss_bit; 2418 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2419 } 2420 } 2421 2422 /* 2423 * Set filter control in FCTRL to determine types of packets are passed 2424 * up to the driver. 2425 * - Pass broadcast packets. 2426 * - Do not pass flow control pause frames (82598-specific) 2427 */ 2428 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2429 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ 2430 if (hw->mac.type == ixgbe_mac_82598EB) { 2431 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ 2432 } 2433 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2434 2435 /* 2436 * Hardware checksum settings 2437 */ 2438 if (ixgbe->rx_hcksum_enable) { 2439 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2440 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2441 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2442 } 2443 2444 /* 2445 * Setup VMDq and RSS for multiple receive queues 2446 */ 2447 switch (ixgbe->classify_mode) { 2448 case IXGBE_CLASSIFY_RSS: 2449 /* 2450 * One group, only RSS is needed when more than 2451 * one ring enabled. 2452 */ 2453 ixgbe_setup_rss(ixgbe); 2454 break; 2455 2456 case IXGBE_CLASSIFY_VMDQ: 2457 /* 2458 * Multiple groups, each group has one ring, 2459 * only VMDq is needed. 2460 */ 2461 ixgbe_setup_vmdq(ixgbe); 2462 break; 2463 2464 case IXGBE_CLASSIFY_VMDQ_RSS: 2465 /* 2466 * Multiple groups and multiple rings, both 2467 * VMDq and RSS are needed. 2468 */ 2469 ixgbe_setup_vmdq_rss(ixgbe); 2470 break; 2471 2472 default: 2473 break; 2474 } 2475 2476 /* 2477 * Enable the receive unit. This must be done after filter 2478 * control is set in FCTRL. On 82598, we disable the descriptor monitor. 2479 * 82598 is the only adapter which defines this RXCTRL option. 2480 */ 2481 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2482 if (hw->mac.type == ixgbe_mac_82598EB) 2483 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ 2484 reg_val |= IXGBE_RXCTRL_RXEN; 2485 (void) ixgbe_enable_rx_dma(hw, reg_val); 2486 2487 /* 2488 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2489 */ 2490 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2491 rx_ring = &ixgbe->rx_rings[i]; 2492 ixgbe_setup_rx_ring(rx_ring); 2493 } 2494 2495 /* 2496 * Setup the per-ring statistics mapping. 2497 */ 2498 ring_mapping = 0; 2499 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2500 index = ixgbe->rx_rings[i].hw_index; 2501 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2502 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2503 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2504 } 2505 2506 /* 2507 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2508 * by four bytes if the packet has a VLAN field, so includes MTU, 2509 * ethernet header and frame check sequence. 2510 * Register is MAXFRS in 82599. 2511 */ 2512 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); 2513 reg_val &= ~IXGBE_MHADD_MFS_MASK; 2514 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) 2515 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2516 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2517 2518 /* 2519 * Setup Jumbo Frame enable bit 2520 */ 2521 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2522 if (ixgbe->default_mtu > ETHERMTU) 2523 reg_val |= IXGBE_HLREG0_JUMBOEN; 2524 else 2525 reg_val &= ~IXGBE_HLREG0_JUMBOEN; 2526 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2527 2528 /* 2529 * Setup RSC for multiple receive queues. 2530 */ 2531 if (ixgbe->lro_enable) { 2532 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2533 /* 2534 * Make sure rx_buf_size * MAXDESC not greater 2535 * than 65535. 2536 * Intel recommends 4 for MAXDESC field value. 2537 */ 2538 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2539 reg_val |= IXGBE_RSCCTL_RSCEN; 2540 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2541 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2542 else 2543 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2544 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2545 } 2546 2547 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2548 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2549 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2550 2551 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2552 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2553 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2554 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2555 2556 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2557 } 2558 } 2559 2560 static void 2561 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2562 { 2563 ixgbe_t *ixgbe = tx_ring->ixgbe; 2564 struct ixgbe_hw *hw = &ixgbe->hw; 2565 uint32_t size; 2566 uint32_t buf_low; 2567 uint32_t buf_high; 2568 uint32_t reg_val; 2569 2570 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2571 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2572 2573 /* 2574 * Initialize the length register 2575 */ 2576 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2577 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2578 2579 /* 2580 * Initialize the base address registers 2581 */ 2582 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2583 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2584 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2585 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2586 2587 /* 2588 * Setup head & tail pointers 2589 */ 2590 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2591 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2592 2593 /* 2594 * Setup head write-back 2595 */ 2596 if (ixgbe->tx_head_wb_enable) { 2597 /* 2598 * The memory of the head write-back is allocated using 2599 * the extra tbd beyond the tail of the tbd ring. 2600 */ 2601 tx_ring->tbd_head_wb = (uint32_t *) 2602 ((uintptr_t)tx_ring->tbd_area.address + size); 2603 *tx_ring->tbd_head_wb = 0; 2604 2605 buf_low = (uint32_t) 2606 (tx_ring->tbd_area.dma_address + size); 2607 buf_high = (uint32_t) 2608 ((tx_ring->tbd_area.dma_address + size) >> 32); 2609 2610 /* Set the head write-back enable bit */ 2611 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2612 2613 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2614 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2615 2616 /* 2617 * Turn off relaxed ordering for head write back or it will 2618 * cause problems with the tx recycling 2619 */ 2620 2621 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2622 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2623 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2624 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2625 if (hw->mac.type == ixgbe_mac_82598EB) { 2626 IXGBE_WRITE_REG(hw, 2627 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2628 } else { 2629 IXGBE_WRITE_REG(hw, 2630 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2631 } 2632 } else { 2633 tx_ring->tbd_head_wb = NULL; 2634 } 2635 2636 tx_ring->tbd_head = 0; 2637 tx_ring->tbd_tail = 0; 2638 tx_ring->tbd_free = tx_ring->ring_size; 2639 2640 if (ixgbe->tx_ring_init == B_TRUE) { 2641 tx_ring->tcb_head = 0; 2642 tx_ring->tcb_tail = 0; 2643 tx_ring->tcb_free = tx_ring->free_list_size; 2644 } 2645 2646 /* 2647 * Initialize the s/w context structure 2648 */ 2649 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2650 } 2651 2652 static void 2653 ixgbe_setup_tx(ixgbe_t *ixgbe) 2654 { 2655 struct ixgbe_hw *hw = &ixgbe->hw; 2656 ixgbe_tx_ring_t *tx_ring; 2657 uint32_t reg_val; 2658 uint32_t ring_mapping; 2659 int i; 2660 2661 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2662 tx_ring = &ixgbe->tx_rings[i]; 2663 ixgbe_setup_tx_ring(tx_ring); 2664 } 2665 2666 /* 2667 * Setup the per-ring statistics mapping. 2668 */ 2669 ring_mapping = 0; 2670 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2671 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2672 if ((i & 0x3) == 0x3) { 2673 switch (hw->mac.type) { 2674 case ixgbe_mac_82598EB: 2675 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2676 ring_mapping); 2677 break; 2678 2679 case ixgbe_mac_82599EB: 2680 case ixgbe_mac_X540: 2681 case ixgbe_mac_X550: 2682 case ixgbe_mac_X550EM_x: 2683 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2684 ring_mapping); 2685 break; 2686 2687 default: 2688 break; 2689 } 2690 2691 ring_mapping = 0; 2692 } 2693 } 2694 if (i & 0x3) { 2695 switch (hw->mac.type) { 2696 case ixgbe_mac_82598EB: 2697 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2698 break; 2699 2700 case ixgbe_mac_82599EB: 2701 case ixgbe_mac_X540: 2702 case ixgbe_mac_X550: 2703 case ixgbe_mac_X550EM_x: 2704 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2705 break; 2706 2707 default: 2708 break; 2709 } 2710 } 2711 2712 /* 2713 * Enable CRC appending and TX padding (for short tx frames) 2714 */ 2715 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2716 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2717 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2718 2719 /* 2720 * enable DMA for 82599, X540 and X550 parts 2721 */ 2722 if (hw->mac.type == ixgbe_mac_82599EB || 2723 hw->mac.type == ixgbe_mac_X540 || 2724 hw->mac.type == ixgbe_mac_X550 || 2725 hw->mac.type == ixgbe_mac_X550EM_x) { 2726 /* DMATXCTL.TE must be set after all Tx config is complete */ 2727 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2728 reg_val |= IXGBE_DMATXCTL_TE; 2729 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2730 2731 /* Disable arbiter to set MTQC */ 2732 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2733 reg_val |= IXGBE_RTTDCS_ARBDIS; 2734 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2735 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2736 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2737 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2738 } 2739 2740 /* 2741 * Enabling tx queues .. 2742 * For 82599 must be done after DMATXCTL.TE is set 2743 */ 2744 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2745 tx_ring = &ixgbe->tx_rings[i]; 2746 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2747 reg_val |= IXGBE_TXDCTL_ENABLE; 2748 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2749 } 2750 } 2751 2752 /* 2753 * ixgbe_setup_rss - Setup receive-side scaling feature. 2754 */ 2755 static void 2756 ixgbe_setup_rss(ixgbe_t *ixgbe) 2757 { 2758 struct ixgbe_hw *hw = &ixgbe->hw; 2759 uint32_t mrqc; 2760 2761 /* 2762 * Initialize RETA/ERETA table 2763 */ 2764 ixgbe_setup_rss_table(ixgbe); 2765 2766 /* 2767 * Enable RSS & perform hash on these packet types 2768 */ 2769 mrqc = IXGBE_MRQC_RSSEN | 2770 IXGBE_MRQC_RSS_FIELD_IPV4 | 2771 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2772 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2773 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2774 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2775 IXGBE_MRQC_RSS_FIELD_IPV6 | 2776 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2777 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2778 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2779 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2780 } 2781 2782 /* 2783 * ixgbe_setup_vmdq - Setup MAC classification feature 2784 */ 2785 static void 2786 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2787 { 2788 struct ixgbe_hw *hw = &ixgbe->hw; 2789 uint32_t vmdctl, i, vtctl; 2790 2791 /* 2792 * Setup the VMDq Control register, enable VMDq based on 2793 * packet destination MAC address: 2794 */ 2795 switch (hw->mac.type) { 2796 case ixgbe_mac_82598EB: 2797 /* 2798 * VMDq Enable = 1; 2799 * VMDq Filter = 0; MAC filtering 2800 * Default VMDq output index = 0; 2801 */ 2802 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2803 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2804 break; 2805 2806 case ixgbe_mac_82599EB: 2807 case ixgbe_mac_X540: 2808 case ixgbe_mac_X550: 2809 case ixgbe_mac_X550EM_x: 2810 /* 2811 * Enable VMDq-only. 2812 */ 2813 vmdctl = IXGBE_MRQC_VMDQEN; 2814 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2815 2816 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2817 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2818 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2819 } 2820 2821 /* 2822 * Enable Virtualization and Replication. 2823 */ 2824 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2825 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2826 2827 /* 2828 * Enable receiving packets to all VFs 2829 */ 2830 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2831 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2832 break; 2833 2834 default: 2835 break; 2836 } 2837 } 2838 2839 /* 2840 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2841 */ 2842 static void 2843 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2844 { 2845 struct ixgbe_hw *hw = &ixgbe->hw; 2846 uint32_t i, mrqc; 2847 uint32_t vtctl, vmdctl; 2848 2849 /* 2850 * Initialize RETA/ERETA table 2851 */ 2852 ixgbe_setup_rss_table(ixgbe); 2853 2854 /* 2855 * Enable and setup RSS and VMDq 2856 */ 2857 switch (hw->mac.type) { 2858 case ixgbe_mac_82598EB: 2859 /* 2860 * Enable RSS & Setup RSS Hash functions 2861 */ 2862 mrqc = IXGBE_MRQC_RSSEN | 2863 IXGBE_MRQC_RSS_FIELD_IPV4 | 2864 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2865 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2866 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2867 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2868 IXGBE_MRQC_RSS_FIELD_IPV6 | 2869 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2870 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2871 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2872 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2873 2874 /* 2875 * Enable and Setup VMDq 2876 * VMDq Filter = 0; MAC filtering 2877 * Default VMDq output index = 0; 2878 */ 2879 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2880 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2881 break; 2882 2883 case ixgbe_mac_82599EB: 2884 case ixgbe_mac_X540: 2885 case ixgbe_mac_X550: 2886 case ixgbe_mac_X550EM_x: 2887 /* 2888 * Enable RSS & Setup RSS Hash functions 2889 */ 2890 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2891 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2892 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2893 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2894 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2895 IXGBE_MRQC_RSS_FIELD_IPV6 | 2896 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2897 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2898 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2899 2900 /* 2901 * Enable VMDq+RSS. 2902 */ 2903 if (ixgbe->num_rx_groups > 32) { 2904 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2905 } else { 2906 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2907 } 2908 2909 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2910 2911 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2912 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2913 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2914 } 2915 break; 2916 2917 default: 2918 break; 2919 2920 } 2921 2922 if (hw->mac.type == ixgbe_mac_82599EB || 2923 hw->mac.type == ixgbe_mac_X540 || 2924 hw->mac.type == ixgbe_mac_X550 || 2925 hw->mac.type == ixgbe_mac_X550EM_x) { 2926 /* 2927 * Enable Virtualization and Replication. 2928 */ 2929 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2930 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2931 2932 /* 2933 * Enable receiving packets to all VFs 2934 */ 2935 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2936 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2937 } 2938 } 2939 2940 /* 2941 * ixgbe_setup_rss_table - Setup RSS table 2942 */ 2943 static void 2944 ixgbe_setup_rss_table(ixgbe_t *ixgbe) 2945 { 2946 struct ixgbe_hw *hw = &ixgbe->hw; 2947 uint32_t i, j; 2948 uint32_t random; 2949 uint32_t reta; 2950 uint32_t ring_per_group; 2951 uint32_t ring; 2952 uint32_t table_size; 2953 uint32_t index_mult; 2954 uint32_t rxcsum; 2955 2956 /* 2957 * Set multiplier for RETA setup and table size based on MAC type. 2958 * RETA table sizes vary by model: 2959 * 2960 * 82598, 82599, X540: 128 table entries. 2961 * X550: 512 table entries. 2962 */ 2963 index_mult = 0x1; 2964 table_size = 128; 2965 switch (ixgbe->hw.mac.type) { 2966 case ixgbe_mac_82598EB: 2967 index_mult = 0x11; 2968 break; 2969 case ixgbe_mac_X550: 2970 case ixgbe_mac_X550EM_x: 2971 table_size = 512; 2972 break; 2973 default: 2974 break; 2975 } 2976 2977 /* 2978 * Fill out RSS redirection table. The configuation of the indices is 2979 * hardware-dependent. 2980 * 2981 * 82598: 8 bits wide containing two 4 bit RSS indices 2982 * 82599, X540: 8 bits wide containing one 4 bit RSS index 2983 * X550: 8 bits wide containing one 6 bit RSS index 2984 */ 2985 reta = 0; 2986 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2987 2988 for (i = 0, j = 0; i < table_size; i++, j++) { 2989 if (j == ring_per_group) j = 0; 2990 2991 /* 2992 * The low 8 bits are for hash value (n+0); 2993 * The next 8 bits are for hash value (n+1), etc. 2994 */ 2995 ring = (j * index_mult); 2996 reta = reta >> 8; 2997 reta = reta | (((uint32_t)ring) << 24); 2998 2999 if ((i & 3) == 3) 3000 /* 3001 * The first 128 table entries are programmed into the 3002 * RETA register, with any beyond that (eg; on X550) 3003 * into ERETA. 3004 */ 3005 if (i < 128) 3006 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3007 else 3008 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3009 reta); 3010 reta = 0; 3011 } 3012 3013 /* 3014 * Fill out hash function seeds with a random constant 3015 */ 3016 for (i = 0; i < 10; i++) { 3017 (void) random_get_pseudo_bytes((uint8_t *)&random, 3018 sizeof (uint32_t)); 3019 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 3020 } 3021 3022 /* 3023 * Disable Packet Checksum to enable RSS for multiple receive queues. 3024 * It is an adapter hardware limitation that Packet Checksum is 3025 * mutually exclusive with RSS. 3026 */ 3027 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3028 rxcsum |= IXGBE_RXCSUM_PCSD; 3029 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 3030 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3031 } 3032 3033 /* 3034 * ixgbe_init_unicst - Initialize the unicast addresses. 3035 */ 3036 static void 3037 ixgbe_init_unicst(ixgbe_t *ixgbe) 3038 { 3039 struct ixgbe_hw *hw = &ixgbe->hw; 3040 uint8_t *mac_addr; 3041 int slot; 3042 /* 3043 * Here we should consider two situations: 3044 * 3045 * 1. Chipset is initialized at the first time, 3046 * Clear all the multiple unicast addresses. 3047 * 3048 * 2. Chipset is reset 3049 * Recover the multiple unicast addresses from the 3050 * software data structure to the RAR registers. 3051 */ 3052 if (!ixgbe->unicst_init) { 3053 /* 3054 * Initialize the multiple unicast addresses 3055 */ 3056 ixgbe->unicst_total = hw->mac.num_rar_entries; 3057 ixgbe->unicst_avail = ixgbe->unicst_total; 3058 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3059 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3060 bzero(mac_addr, ETHERADDRL); 3061 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 3062 ixgbe->unicst_addr[slot].mac.set = 0; 3063 } 3064 ixgbe->unicst_init = B_TRUE; 3065 } else { 3066 /* Re-configure the RAR registers */ 3067 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3068 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3069 if (ixgbe->unicst_addr[slot].mac.set == 1) { 3070 (void) ixgbe_set_rar(hw, slot, mac_addr, 3071 ixgbe->unicst_addr[slot].mac.group_index, 3072 IXGBE_RAH_AV); 3073 } else { 3074 bzero(mac_addr, ETHERADDRL); 3075 (void) ixgbe_set_rar(hw, slot, mac_addr, 3076 NULL, NULL); 3077 } 3078 } 3079 } 3080 } 3081 3082 /* 3083 * ixgbe_unicst_find - Find the slot for the specified unicast address 3084 */ 3085 int 3086 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 3087 { 3088 int slot; 3089 3090 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3091 3092 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3093 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 3094 mac_addr, ETHERADDRL) == 0) 3095 return (slot); 3096 } 3097 3098 return (-1); 3099 } 3100 3101 /* 3102 * ixgbe_multicst_add - Add a multicst address. 3103 */ 3104 int 3105 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3106 { 3107 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3108 3109 if ((multiaddr[0] & 01) == 0) { 3110 return (EINVAL); 3111 } 3112 3113 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 3114 return (ENOENT); 3115 } 3116 3117 bcopy(multiaddr, 3118 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 3119 ixgbe->mcast_count++; 3120 3121 /* 3122 * Update the multicast table in the hardware 3123 */ 3124 ixgbe_setup_multicst(ixgbe); 3125 3126 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3127 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3128 return (EIO); 3129 } 3130 3131 return (0); 3132 } 3133 3134 /* 3135 * ixgbe_multicst_remove - Remove a multicst address. 3136 */ 3137 int 3138 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3139 { 3140 int i; 3141 3142 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3143 3144 for (i = 0; i < ixgbe->mcast_count; i++) { 3145 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 3146 ETHERADDRL) == 0) { 3147 for (i++; i < ixgbe->mcast_count; i++) { 3148 ixgbe->mcast_table[i - 1] = 3149 ixgbe->mcast_table[i]; 3150 } 3151 ixgbe->mcast_count--; 3152 break; 3153 } 3154 } 3155 3156 /* 3157 * Update the multicast table in the hardware 3158 */ 3159 ixgbe_setup_multicst(ixgbe); 3160 3161 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3162 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3163 return (EIO); 3164 } 3165 3166 return (0); 3167 } 3168 3169 /* 3170 * ixgbe_setup_multicast - Setup multicast data structures. 3171 * 3172 * This routine initializes all of the multicast related structures 3173 * and save them in the hardware registers. 3174 */ 3175 static void 3176 ixgbe_setup_multicst(ixgbe_t *ixgbe) 3177 { 3178 uint8_t *mc_addr_list; 3179 uint32_t mc_addr_count; 3180 struct ixgbe_hw *hw = &ixgbe->hw; 3181 3182 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3183 3184 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 3185 3186 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 3187 mc_addr_count = ixgbe->mcast_count; 3188 3189 /* 3190 * Update the multicast addresses to the MTA registers 3191 */ 3192 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 3193 ixgbe_mc_table_itr, TRUE); 3194 } 3195 3196 /* 3197 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 3198 * 3199 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 3200 * Different chipsets may have different allowed configuration of vmdq and rss. 3201 */ 3202 static void 3203 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 3204 { 3205 struct ixgbe_hw *hw = &ixgbe->hw; 3206 uint32_t ring_per_group; 3207 3208 switch (hw->mac.type) { 3209 case ixgbe_mac_82598EB: 3210 /* 3211 * 82598 supports the following combination: 3212 * vmdq no. x rss no. 3213 * [5..16] x 1 3214 * [1..4] x [1..16] 3215 * However 8 rss queue per pool (vmdq) is sufficient for 3216 * most cases. 3217 */ 3218 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3219 if (ixgbe->num_rx_groups > 4) { 3220 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 3221 } else { 3222 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3223 min(8, ring_per_group); 3224 } 3225 3226 break; 3227 3228 case ixgbe_mac_82599EB: 3229 case ixgbe_mac_X540: 3230 case ixgbe_mac_X550: 3231 case ixgbe_mac_X550EM_x: 3232 /* 3233 * 82599 supports the following combination: 3234 * vmdq no. x rss no. 3235 * [33..64] x [1..2] 3236 * [2..32] x [1..4] 3237 * 1 x [1..16] 3238 * However 8 rss queue per pool (vmdq) is sufficient for 3239 * most cases. 3240 * 3241 * For now, treat X540 and X550 like the 82599. 3242 */ 3243 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3244 if (ixgbe->num_rx_groups == 1) { 3245 ixgbe->num_rx_rings = min(8, ring_per_group); 3246 } else if (ixgbe->num_rx_groups <= 32) { 3247 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3248 min(4, ring_per_group); 3249 } else if (ixgbe->num_rx_groups <= 64) { 3250 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3251 min(2, ring_per_group); 3252 } 3253 break; 3254 3255 default: 3256 break; 3257 } 3258 3259 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3260 3261 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 3262 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3263 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 3264 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 3265 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 3266 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 3267 } else { 3268 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 3269 } 3270 3271 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 3272 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 3273 } 3274 3275 /* 3276 * ixgbe_get_conf - Get driver configurations set in driver.conf. 3277 * 3278 * This routine gets user-configured values out of the configuration 3279 * file ixgbe.conf. 3280 * 3281 * For each configurable value, there is a minimum, a maximum, and a 3282 * default. 3283 * If user does not configure a value, use the default. 3284 * If user configures below the minimum, use the minumum. 3285 * If user configures above the maximum, use the maxumum. 3286 */ 3287 static void 3288 ixgbe_get_conf(ixgbe_t *ixgbe) 3289 { 3290 struct ixgbe_hw *hw = &ixgbe->hw; 3291 uint32_t flow_control; 3292 3293 /* 3294 * ixgbe driver supports the following user configurations: 3295 * 3296 * Jumbo frame configuration: 3297 * default_mtu 3298 * 3299 * Ethernet flow control configuration: 3300 * flow_control 3301 * 3302 * Multiple rings configurations: 3303 * tx_queue_number 3304 * tx_ring_size 3305 * rx_queue_number 3306 * rx_ring_size 3307 * 3308 * Call ixgbe_get_prop() to get the value for a specific 3309 * configuration parameter. 3310 */ 3311 3312 /* 3313 * Jumbo frame configuration - max_frame_size controls host buffer 3314 * allocation, so includes MTU, ethernet header, vlan tag and 3315 * frame check sequence. 3316 */ 3317 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3318 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3319 3320 ixgbe->max_frame_size = ixgbe->default_mtu + 3321 sizeof (struct ether_vlan_header) + ETHERFCSL; 3322 3323 /* 3324 * Ethernet flow control configuration 3325 */ 3326 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3327 ixgbe_fc_none, 3, ixgbe_fc_none); 3328 if (flow_control == 3) 3329 flow_control = ixgbe_fc_default; 3330 3331 /* 3332 * fc.requested mode is what the user requests. After autoneg, 3333 * fc.current_mode will be the flow_control mode that was negotiated. 3334 */ 3335 hw->fc.requested_mode = flow_control; 3336 3337 /* 3338 * Multiple rings configurations 3339 */ 3340 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3341 ixgbe->capab->min_tx_que_num, 3342 ixgbe->capab->max_tx_que_num, 3343 ixgbe->capab->def_tx_que_num); 3344 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3345 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3346 3347 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3348 ixgbe->capab->min_rx_que_num, 3349 ixgbe->capab->max_rx_que_num, 3350 ixgbe->capab->def_rx_que_num); 3351 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3352 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3353 3354 /* 3355 * Multiple groups configuration 3356 */ 3357 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3358 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3359 ixgbe->capab->def_rx_grp_num); 3360 3361 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3362 0, 1, DEFAULT_MR_ENABLE); 3363 3364 if (ixgbe->mr_enable == B_FALSE) { 3365 ixgbe->num_tx_rings = 1; 3366 ixgbe->num_rx_rings = 1; 3367 ixgbe->num_rx_groups = 1; 3368 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3369 } else { 3370 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3371 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3372 /* 3373 * The combination of num_rx_rings and num_rx_groups 3374 * may be not supported by h/w. We need to adjust 3375 * them to appropriate values. 3376 */ 3377 ixgbe_setup_vmdq_rss_conf(ixgbe); 3378 } 3379 3380 /* 3381 * Tunable used to force an interrupt type. The only use is 3382 * for testing of the lesser interrupt types. 3383 * 0 = don't force interrupt type 3384 * 1 = force interrupt type MSI-X 3385 * 2 = force interrupt type MSI 3386 * 3 = force interrupt type Legacy 3387 */ 3388 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3389 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3390 3391 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3392 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3393 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3394 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3395 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3396 0, 1, DEFAULT_LSO_ENABLE); 3397 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3398 0, 1, DEFAULT_LRO_ENABLE); 3399 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3400 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3401 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3402 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3403 3404 /* Head Write Back not recommended for 82599, X540 and X550 */ 3405 if (hw->mac.type == ixgbe_mac_82599EB || 3406 hw->mac.type == ixgbe_mac_X540 || 3407 hw->mac.type == ixgbe_mac_X550 || 3408 hw->mac.type == ixgbe_mac_X550EM_x) { 3409 ixgbe->tx_head_wb_enable = B_FALSE; 3410 } 3411 3412 /* 3413 * ixgbe LSO needs the tx h/w checksum support. 3414 * LSO will be disabled if tx h/w checksum is not 3415 * enabled. 3416 */ 3417 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3418 ixgbe->lso_enable = B_FALSE; 3419 } 3420 3421 /* 3422 * ixgbe LRO needs the rx h/w checksum support. 3423 * LRO will be disabled if rx h/w checksum is not 3424 * enabled. 3425 */ 3426 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3427 ixgbe->lro_enable = B_FALSE; 3428 } 3429 3430 /* 3431 * ixgbe LRO only supported by 82599, X540 and X550 3432 */ 3433 if (hw->mac.type == ixgbe_mac_82598EB) { 3434 ixgbe->lro_enable = B_FALSE; 3435 } 3436 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3437 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3438 DEFAULT_TX_COPY_THRESHOLD); 3439 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3440 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3441 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3442 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3443 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3444 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3445 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3446 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3447 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3448 3449 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3450 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3451 DEFAULT_RX_COPY_THRESHOLD); 3452 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3453 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3454 DEFAULT_RX_LIMIT_PER_INTR); 3455 3456 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3457 ixgbe->capab->min_intr_throttle, 3458 ixgbe->capab->max_intr_throttle, 3459 ixgbe->capab->def_intr_throttle); 3460 /* 3461 * 82599, X540 and X550 require the interrupt throttling rate is 3462 * a multiple of 8. This is enforced by the register definiton. 3463 */ 3464 if (hw->mac.type == ixgbe_mac_82599EB || 3465 hw->mac.type == ixgbe_mac_X540 || 3466 hw->mac.type == ixgbe_mac_X550 || 3467 hw->mac.type == ixgbe_mac_X550EM_x) 3468 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3469 3470 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3471 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3472 } 3473 3474 static void 3475 ixgbe_init_params(ixgbe_t *ixgbe) 3476 { 3477 struct ixgbe_hw *hw = &ixgbe->hw; 3478 ixgbe_link_speed speeds_supported = 0; 3479 boolean_t negotiate; 3480 3481 /* 3482 * Get a list of speeds the adapter supports. If the hw struct hasn't 3483 * been populated with this information yet, retrieve it from the 3484 * adapter and save it to our own variable. 3485 * 3486 * On certain adapters, such as ones which use SFPs, the contents of 3487 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not 3488 * updated, so we must rely on calling ixgbe_get_link_capabilities() 3489 * in order to ascertain the speeds which we are capable of supporting, 3490 * and in the case of SFP-equipped adapters, which speed we are 3491 * advertising. If ixgbe_get_link_capabilities() fails for some reason, 3492 * we'll go with a default list of speeds as a last resort. 3493 */ 3494 speeds_supported = hw->phy.speeds_supported; 3495 3496 if (speeds_supported == 0) { 3497 if (ixgbe_get_link_capabilities(hw, &speeds_supported, 3498 &negotiate) != IXGBE_SUCCESS) { 3499 if (hw->mac.type == ixgbe_mac_82598EB) { 3500 speeds_supported = 3501 IXGBE_LINK_SPEED_82598_AUTONEG; 3502 } else { 3503 speeds_supported = 3504 IXGBE_LINK_SPEED_82599_AUTONEG; 3505 } 3506 } 3507 } 3508 ixgbe->speeds_supported = speeds_supported; 3509 3510 /* 3511 * By default, all supported speeds are enabled and advertised. 3512 */ 3513 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) { 3514 ixgbe->param_en_10000fdx_cap = 1; 3515 ixgbe->param_adv_10000fdx_cap = 1; 3516 } else { 3517 ixgbe->param_en_10000fdx_cap = 0; 3518 ixgbe->param_adv_10000fdx_cap = 0; 3519 } 3520 3521 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) { 3522 ixgbe->param_en_5000fdx_cap = 1; 3523 ixgbe->param_adv_5000fdx_cap = 1; 3524 } else { 3525 ixgbe->param_en_5000fdx_cap = 0; 3526 ixgbe->param_adv_5000fdx_cap = 0; 3527 } 3528 3529 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) { 3530 ixgbe->param_en_2500fdx_cap = 1; 3531 ixgbe->param_adv_2500fdx_cap = 1; 3532 } else { 3533 ixgbe->param_en_2500fdx_cap = 0; 3534 ixgbe->param_adv_2500fdx_cap = 0; 3535 } 3536 3537 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) { 3538 ixgbe->param_en_1000fdx_cap = 1; 3539 ixgbe->param_adv_1000fdx_cap = 1; 3540 } else { 3541 ixgbe->param_en_1000fdx_cap = 0; 3542 ixgbe->param_adv_1000fdx_cap = 0; 3543 } 3544 3545 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) { 3546 ixgbe->param_en_100fdx_cap = 1; 3547 ixgbe->param_adv_100fdx_cap = 1; 3548 } else { 3549 ixgbe->param_en_100fdx_cap = 0; 3550 ixgbe->param_adv_100fdx_cap = 0; 3551 } 3552 3553 ixgbe->param_pause_cap = 1; 3554 ixgbe->param_asym_pause_cap = 1; 3555 ixgbe->param_rem_fault = 0; 3556 3557 ixgbe->param_adv_autoneg_cap = 1; 3558 ixgbe->param_adv_pause_cap = 1; 3559 ixgbe->param_adv_asym_pause_cap = 1; 3560 ixgbe->param_adv_rem_fault = 0; 3561 3562 ixgbe->param_lp_10000fdx_cap = 0; 3563 ixgbe->param_lp_5000fdx_cap = 0; 3564 ixgbe->param_lp_2500fdx_cap = 0; 3565 ixgbe->param_lp_1000fdx_cap = 0; 3566 ixgbe->param_lp_100fdx_cap = 0; 3567 ixgbe->param_lp_autoneg_cap = 0; 3568 ixgbe->param_lp_pause_cap = 0; 3569 ixgbe->param_lp_asym_pause_cap = 0; 3570 ixgbe->param_lp_rem_fault = 0; 3571 } 3572 3573 /* 3574 * ixgbe_get_prop - Get a property value out of the configuration file 3575 * ixgbe.conf. 3576 * 3577 * Caller provides the name of the property, a default value, a minimum 3578 * value, and a maximum value. 3579 * 3580 * Return configured value of the property, with default, minimum and 3581 * maximum properly applied. 3582 */ 3583 static int 3584 ixgbe_get_prop(ixgbe_t *ixgbe, 3585 char *propname, /* name of the property */ 3586 int minval, /* minimum acceptable value */ 3587 int maxval, /* maximim acceptable value */ 3588 int defval) /* default value */ 3589 { 3590 int value; 3591 3592 /* 3593 * Call ddi_prop_get_int() to read the conf settings 3594 */ 3595 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3596 DDI_PROP_DONTPASS, propname, defval); 3597 if (value > maxval) 3598 value = maxval; 3599 3600 if (value < minval) 3601 value = minval; 3602 3603 return (value); 3604 } 3605 3606 /* 3607 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3608 */ 3609 int 3610 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3611 { 3612 struct ixgbe_hw *hw = &ixgbe->hw; 3613 ixgbe_link_speed advertised = 0; 3614 3615 /* 3616 * Assemble a list of enabled speeds to auto-negotiate with. 3617 */ 3618 if (ixgbe->param_en_10000fdx_cap == 1) 3619 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3620 3621 if (ixgbe->param_en_5000fdx_cap == 1) 3622 advertised |= IXGBE_LINK_SPEED_5GB_FULL; 3623 3624 if (ixgbe->param_en_2500fdx_cap == 1) 3625 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; 3626 3627 if (ixgbe->param_en_1000fdx_cap == 1) 3628 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3629 3630 if (ixgbe->param_en_100fdx_cap == 1) 3631 advertised |= IXGBE_LINK_SPEED_100_FULL; 3632 3633 /* 3634 * As a last resort, autoneg with a default list of speeds. 3635 */ 3636 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) { 3637 ixgbe_notice(ixgbe, "Invalid link settings. Setting link " 3638 "to autonegotiate with full capabilities."); 3639 3640 if (hw->mac.type == ixgbe_mac_82598EB) 3641 advertised = IXGBE_LINK_SPEED_82598_AUTONEG; 3642 else 3643 advertised = IXGBE_LINK_SPEED_82599_AUTONEG; 3644 } 3645 3646 if (setup_hw) { 3647 if (ixgbe_setup_link(&ixgbe->hw, advertised, 3648 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) { 3649 ixgbe_notice(ixgbe, "Setup link failed on this " 3650 "device."); 3651 return (IXGBE_FAILURE); 3652 } 3653 } 3654 3655 return (IXGBE_SUCCESS); 3656 } 3657 3658 /* 3659 * ixgbe_driver_link_check - Link status processing. 3660 * 3661 * This function can be called in both kernel context and interrupt context 3662 */ 3663 static void 3664 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3665 { 3666 struct ixgbe_hw *hw = &ixgbe->hw; 3667 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3668 boolean_t link_up = B_FALSE; 3669 boolean_t link_changed = B_FALSE; 3670 3671 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3672 3673 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3674 if (link_up) { 3675 ixgbe->link_check_complete = B_TRUE; 3676 3677 /* Link is up, enable flow control settings */ 3678 (void) ixgbe_fc_enable(hw); 3679 3680 /* 3681 * The Link is up, check whether it was marked as down earlier 3682 */ 3683 if (ixgbe->link_state != LINK_STATE_UP) { 3684 switch (speed) { 3685 case IXGBE_LINK_SPEED_10GB_FULL: 3686 ixgbe->link_speed = SPEED_10GB; 3687 break; 3688 case IXGBE_LINK_SPEED_5GB_FULL: 3689 ixgbe->link_speed = SPEED_5GB; 3690 break; 3691 case IXGBE_LINK_SPEED_2_5GB_FULL: 3692 ixgbe->link_speed = SPEED_2_5GB; 3693 break; 3694 case IXGBE_LINK_SPEED_1GB_FULL: 3695 ixgbe->link_speed = SPEED_1GB; 3696 break; 3697 case IXGBE_LINK_SPEED_100_FULL: 3698 ixgbe->link_speed = SPEED_100; 3699 } 3700 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3701 ixgbe->link_state = LINK_STATE_UP; 3702 link_changed = B_TRUE; 3703 } 3704 } else { 3705 if (ixgbe->link_check_complete == B_TRUE || 3706 (ixgbe->link_check_complete == B_FALSE && 3707 gethrtime() >= ixgbe->link_check_hrtime)) { 3708 /* 3709 * The link is really down 3710 */ 3711 ixgbe->link_check_complete = B_TRUE; 3712 3713 if (ixgbe->link_state != LINK_STATE_DOWN) { 3714 ixgbe->link_speed = 0; 3715 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3716 ixgbe->link_state = LINK_STATE_DOWN; 3717 link_changed = B_TRUE; 3718 } 3719 } 3720 } 3721 3722 /* 3723 * If we are in an interrupt context, need to re-enable the 3724 * interrupt, which was automasked 3725 */ 3726 if (servicing_interrupt() != 0) { 3727 ixgbe->eims |= IXGBE_EICR_LSC; 3728 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3729 } 3730 3731 if (link_changed) { 3732 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3733 } 3734 } 3735 3736 /* 3737 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3738 */ 3739 static void 3740 ixgbe_sfp_check(void *arg) 3741 { 3742 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3743 uint32_t eicr = ixgbe->eicr; 3744 struct ixgbe_hw *hw = &ixgbe->hw; 3745 3746 mutex_enter(&ixgbe->gen_lock); 3747 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 3748 /* clear the interrupt */ 3749 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3750 3751 /* if link up, do multispeed fiber setup */ 3752 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3753 B_TRUE); 3754 ixgbe_driver_link_check(ixgbe); 3755 ixgbe_get_hw_state(ixgbe); 3756 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) { 3757 /* clear the interrupt */ 3758 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw)); 3759 3760 /* if link up, do sfp module setup */ 3761 (void) hw->mac.ops.setup_sfp(hw); 3762 3763 /* do multispeed fiber setup */ 3764 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3765 B_TRUE); 3766 ixgbe_driver_link_check(ixgbe); 3767 ixgbe_get_hw_state(ixgbe); 3768 } 3769 mutex_exit(&ixgbe->gen_lock); 3770 3771 /* 3772 * We need to fully re-check the link later. 3773 */ 3774 ixgbe->link_check_complete = B_FALSE; 3775 ixgbe->link_check_hrtime = gethrtime() + 3776 (IXGBE_LINK_UP_TIME * 100000000ULL); 3777 } 3778 3779 /* 3780 * ixgbe_overtemp_check - overtemp module processing done in taskq 3781 * 3782 * This routine will only be called on adapters with temperature sensor. 3783 * The indication of over-temperature can be either SDP0 interrupt or the link 3784 * status change interrupt. 3785 */ 3786 static void 3787 ixgbe_overtemp_check(void *arg) 3788 { 3789 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3790 struct ixgbe_hw *hw = &ixgbe->hw; 3791 uint32_t eicr = ixgbe->eicr; 3792 ixgbe_link_speed speed; 3793 boolean_t link_up; 3794 3795 mutex_enter(&ixgbe->gen_lock); 3796 3797 /* make sure we know current state of link */ 3798 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3799 3800 /* check over-temp condition */ 3801 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) || 3802 (eicr & IXGBE_EICR_LSC)) { 3803 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3804 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3805 3806 /* 3807 * Disable the adapter interrupts 3808 */ 3809 ixgbe_disable_adapter_interrupts(ixgbe); 3810 3811 /* 3812 * Disable Rx/Tx units 3813 */ 3814 (void) ixgbe_stop_adapter(hw); 3815 3816 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3817 ixgbe_error(ixgbe, 3818 "Problem: Network adapter has been stopped " 3819 "because it has overheated"); 3820 ixgbe_error(ixgbe, 3821 "Action: Restart the computer. " 3822 "If the problem persists, power off the system " 3823 "and replace the adapter"); 3824 } 3825 } 3826 3827 /* write to clear the interrupt */ 3828 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3829 3830 mutex_exit(&ixgbe->gen_lock); 3831 } 3832 3833 /* 3834 * ixgbe_phy_check - taskq to process interrupts from an external PHY 3835 * 3836 * This routine will only be called on adapters with external PHYs 3837 * (such as X550) that may be trying to raise our attention to some event. 3838 * Currently, this is limited to claiming PHY overtemperature and link status 3839 * change (LSC) events, however this may expand to include other things in 3840 * future adapters. 3841 */ 3842 static void 3843 ixgbe_phy_check(void *arg) 3844 { 3845 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3846 struct ixgbe_hw *hw = &ixgbe->hw; 3847 int rv; 3848 3849 mutex_enter(&ixgbe->gen_lock); 3850 3851 /* 3852 * X550 baseT PHY overtemp and LSC events are handled here. 3853 * 3854 * If an overtemp event occurs, it will be reflected in the 3855 * return value of phy.ops.handle_lasi() and the common code will 3856 * automatically power off the baseT PHY. This is our cue to trigger 3857 * an FMA event. 3858 * 3859 * If a link status change event occurs, phy.ops.handle_lasi() will 3860 * automatically initiate a link setup between the integrated KR PHY 3861 * and the external X557 PHY to ensure that the link speed between 3862 * them matches the link speed of the baseT link. 3863 */ 3864 rv = ixgbe_handle_lasi(hw); 3865 3866 if (rv == IXGBE_ERR_OVERTEMP) { 3867 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3868 3869 /* 3870 * Disable the adapter interrupts 3871 */ 3872 ixgbe_disable_adapter_interrupts(ixgbe); 3873 3874 /* 3875 * Disable Rx/Tx units 3876 */ 3877 (void) ixgbe_stop_adapter(hw); 3878 3879 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3880 ixgbe_error(ixgbe, 3881 "Problem: Network adapter has been stopped due to a " 3882 "overtemperature event being detected."); 3883 ixgbe_error(ixgbe, 3884 "Action: Shut down or restart the computer. If the issue " 3885 "persists, please take action in accordance with the " 3886 "recommendations from your system vendor."); 3887 } 3888 3889 mutex_exit(&ixgbe->gen_lock); 3890 } 3891 3892 /* 3893 * ixgbe_link_timer - timer for link status detection 3894 */ 3895 static void 3896 ixgbe_link_timer(void *arg) 3897 { 3898 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3899 3900 mutex_enter(&ixgbe->gen_lock); 3901 ixgbe_driver_link_check(ixgbe); 3902 mutex_exit(&ixgbe->gen_lock); 3903 } 3904 3905 /* 3906 * ixgbe_local_timer - Driver watchdog function. 3907 * 3908 * This function will handle the transmit stall check and other routines. 3909 */ 3910 static void 3911 ixgbe_local_timer(void *arg) 3912 { 3913 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3914 3915 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 3916 goto out; 3917 3918 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3919 ixgbe->reset_count++; 3920 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3921 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3922 goto out; 3923 } 3924 3925 if (ixgbe_stall_check(ixgbe)) { 3926 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3927 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3928 3929 ixgbe->reset_count++; 3930 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3931 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3932 } 3933 3934 out: 3935 ixgbe_restart_watchdog_timer(ixgbe); 3936 } 3937 3938 /* 3939 * ixgbe_stall_check - Check for transmit stall. 3940 * 3941 * This function checks if the adapter is stalled (in transmit). 3942 * 3943 * It is called each time the watchdog timeout is invoked. 3944 * If the transmit descriptor reclaim continuously fails, 3945 * the watchdog value will increment by 1. If the watchdog 3946 * value exceeds the threshold, the ixgbe is assumed to 3947 * have stalled and need to be reset. 3948 */ 3949 static boolean_t 3950 ixgbe_stall_check(ixgbe_t *ixgbe) 3951 { 3952 ixgbe_tx_ring_t *tx_ring; 3953 boolean_t result; 3954 int i; 3955 3956 if (ixgbe->link_state != LINK_STATE_UP) 3957 return (B_FALSE); 3958 3959 /* 3960 * If any tx ring is stalled, we'll reset the chipset 3961 */ 3962 result = B_FALSE; 3963 for (i = 0; i < ixgbe->num_tx_rings; i++) { 3964 tx_ring = &ixgbe->tx_rings[i]; 3965 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 3966 tx_ring->tx_recycle(tx_ring); 3967 } 3968 3969 if (tx_ring->recycle_fail > 0) 3970 tx_ring->stall_watchdog++; 3971 else 3972 tx_ring->stall_watchdog = 0; 3973 3974 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3975 result = B_TRUE; 3976 break; 3977 } 3978 } 3979 3980 if (result) { 3981 tx_ring->stall_watchdog = 0; 3982 tx_ring->recycle_fail = 0; 3983 } 3984 3985 return (result); 3986 } 3987 3988 3989 /* 3990 * is_valid_mac_addr - Check if the mac address is valid. 3991 */ 3992 static boolean_t 3993 is_valid_mac_addr(uint8_t *mac_addr) 3994 { 3995 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 3996 const uint8_t addr_test2[6] = 3997 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3998 3999 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4000 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4001 return (B_FALSE); 4002 4003 return (B_TRUE); 4004 } 4005 4006 static boolean_t 4007 ixgbe_find_mac_address(ixgbe_t *ixgbe) 4008 { 4009 #ifdef __sparc 4010 struct ixgbe_hw *hw = &ixgbe->hw; 4011 uchar_t *bytes; 4012 struct ether_addr sysaddr; 4013 uint_t nelts; 4014 int err; 4015 boolean_t found = B_FALSE; 4016 4017 /* 4018 * The "vendor's factory-set address" may already have 4019 * been extracted from the chip, but if the property 4020 * "local-mac-address" is set we use that instead. 4021 * 4022 * We check whether it looks like an array of 6 4023 * bytes (which it should, if OBP set it). If we can't 4024 * make sense of it this way, we'll ignore it. 4025 */ 4026 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4027 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4028 if (err == DDI_PROP_SUCCESS) { 4029 if (nelts == ETHERADDRL) { 4030 while (nelts--) 4031 hw->mac.addr[nelts] = bytes[nelts]; 4032 found = B_TRUE; 4033 } 4034 ddi_prop_free(bytes); 4035 } 4036 4037 /* 4038 * Look up the OBP property "local-mac-address?". If the user has set 4039 * 'local-mac-address? = false', use "the system address" instead. 4040 */ 4041 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 4042 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4043 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4044 if (localetheraddr(NULL, &sysaddr) != 0) { 4045 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4046 found = B_TRUE; 4047 } 4048 } 4049 ddi_prop_free(bytes); 4050 } 4051 4052 /* 4053 * Finally(!), if there's a valid "mac-address" property (created 4054 * if we netbooted from this interface), we must use this instead 4055 * of any of the above to ensure that the NFS/install server doesn't 4056 * get confused by the address changing as illumos takes over! 4057 */ 4058 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4059 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4060 if (err == DDI_PROP_SUCCESS) { 4061 if (nelts == ETHERADDRL) { 4062 while (nelts--) 4063 hw->mac.addr[nelts] = bytes[nelts]; 4064 found = B_TRUE; 4065 } 4066 ddi_prop_free(bytes); 4067 } 4068 4069 if (found) { 4070 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 4071 return (B_TRUE); 4072 } 4073 #else 4074 _NOTE(ARGUNUSED(ixgbe)); 4075 #endif 4076 4077 return (B_TRUE); 4078 } 4079 4080 #pragma inline(ixgbe_arm_watchdog_timer) 4081 static void 4082 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 4083 { 4084 /* 4085 * Fire a watchdog timer 4086 */ 4087 ixgbe->watchdog_tid = 4088 timeout(ixgbe_local_timer, 4089 (void *)ixgbe, 1 * drv_usectohz(1000000)); 4090 4091 } 4092 4093 /* 4094 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 4095 */ 4096 void 4097 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 4098 { 4099 mutex_enter(&ixgbe->watchdog_lock); 4100 4101 if (!ixgbe->watchdog_enable) { 4102 ixgbe->watchdog_enable = B_TRUE; 4103 ixgbe->watchdog_start = B_TRUE; 4104 ixgbe_arm_watchdog_timer(ixgbe); 4105 } 4106 4107 mutex_exit(&ixgbe->watchdog_lock); 4108 } 4109 4110 /* 4111 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 4112 */ 4113 void 4114 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 4115 { 4116 timeout_id_t tid; 4117 4118 mutex_enter(&ixgbe->watchdog_lock); 4119 4120 ixgbe->watchdog_enable = B_FALSE; 4121 ixgbe->watchdog_start = B_FALSE; 4122 tid = ixgbe->watchdog_tid; 4123 ixgbe->watchdog_tid = 0; 4124 4125 mutex_exit(&ixgbe->watchdog_lock); 4126 4127 if (tid != 0) 4128 (void) untimeout(tid); 4129 } 4130 4131 /* 4132 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 4133 */ 4134 void 4135 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 4136 { 4137 mutex_enter(&ixgbe->watchdog_lock); 4138 4139 if (ixgbe->watchdog_enable) { 4140 if (!ixgbe->watchdog_start) { 4141 ixgbe->watchdog_start = B_TRUE; 4142 ixgbe_arm_watchdog_timer(ixgbe); 4143 } 4144 } 4145 4146 mutex_exit(&ixgbe->watchdog_lock); 4147 } 4148 4149 /* 4150 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 4151 */ 4152 static void 4153 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 4154 { 4155 mutex_enter(&ixgbe->watchdog_lock); 4156 4157 if (ixgbe->watchdog_start) 4158 ixgbe_arm_watchdog_timer(ixgbe); 4159 4160 mutex_exit(&ixgbe->watchdog_lock); 4161 } 4162 4163 /* 4164 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 4165 */ 4166 void 4167 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 4168 { 4169 timeout_id_t tid; 4170 4171 mutex_enter(&ixgbe->watchdog_lock); 4172 4173 ixgbe->watchdog_start = B_FALSE; 4174 tid = ixgbe->watchdog_tid; 4175 ixgbe->watchdog_tid = 0; 4176 4177 mutex_exit(&ixgbe->watchdog_lock); 4178 4179 if (tid != 0) 4180 (void) untimeout(tid); 4181 } 4182 4183 /* 4184 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 4185 */ 4186 static void 4187 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 4188 { 4189 struct ixgbe_hw *hw = &ixgbe->hw; 4190 4191 /* 4192 * mask all interrupts off 4193 */ 4194 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 4195 4196 /* 4197 * for MSI-X, also disable autoclear 4198 */ 4199 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4200 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 4201 } 4202 4203 IXGBE_WRITE_FLUSH(hw); 4204 } 4205 4206 /* 4207 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 4208 */ 4209 static void 4210 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 4211 { 4212 struct ixgbe_hw *hw = &ixgbe->hw; 4213 uint32_t eiac, eiam; 4214 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4215 4216 /* interrupt types to enable */ 4217 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 4218 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 4219 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 4220 4221 /* enable automask on "other" causes that this adapter can generate */ 4222 eiam = ixgbe->capab->other_intr; 4223 4224 /* 4225 * msi-x mode 4226 */ 4227 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4228 /* enable autoclear but not on bits 29:20 */ 4229 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 4230 4231 /* general purpose interrupt enable */ 4232 gpie |= (IXGBE_GPIE_MSIX_MODE 4233 | IXGBE_GPIE_PBA_SUPPORT 4234 | IXGBE_GPIE_OCD 4235 | IXGBE_GPIE_EIAME); 4236 /* 4237 * non-msi-x mode 4238 */ 4239 } else { 4240 4241 /* disable autoclear, leave gpie at default */ 4242 eiac = 0; 4243 4244 /* 4245 * General purpose interrupt enable. 4246 * For 82599, X540 and X550, extended interrupt 4247 * automask enable only in MSI or MSI-X mode 4248 */ 4249 if ((hw->mac.type == ixgbe_mac_82598EB) || 4250 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 4251 gpie |= IXGBE_GPIE_EIAME; 4252 } 4253 } 4254 4255 /* Enable specific "other" interrupt types */ 4256 switch (hw->mac.type) { 4257 case ixgbe_mac_82598EB: 4258 gpie |= ixgbe->capab->other_gpie; 4259 break; 4260 4261 case ixgbe_mac_82599EB: 4262 case ixgbe_mac_X540: 4263 case ixgbe_mac_X550: 4264 case ixgbe_mac_X550EM_x: 4265 gpie |= ixgbe->capab->other_gpie; 4266 4267 /* Enable RSC Delay 8us when LRO enabled */ 4268 if (ixgbe->lro_enable) { 4269 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 4270 } 4271 break; 4272 4273 default: 4274 break; 4275 } 4276 4277 /* write to interrupt control registers */ 4278 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4279 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 4280 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 4281 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4282 IXGBE_WRITE_FLUSH(hw); 4283 } 4284 4285 /* 4286 * ixgbe_loopback_ioctl - Loopback support. 4287 */ 4288 enum ioc_reply 4289 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 4290 { 4291 lb_info_sz_t *lbsp; 4292 lb_property_t *lbpp; 4293 uint32_t *lbmp; 4294 uint32_t size; 4295 uint32_t value; 4296 4297 if (mp->b_cont == NULL) 4298 return (IOC_INVAL); 4299 4300 switch (iocp->ioc_cmd) { 4301 default: 4302 return (IOC_INVAL); 4303 4304 case LB_GET_INFO_SIZE: 4305 size = sizeof (lb_info_sz_t); 4306 if (iocp->ioc_count != size) 4307 return (IOC_INVAL); 4308 4309 value = sizeof (lb_normal); 4310 value += sizeof (lb_mac); 4311 value += sizeof (lb_external); 4312 4313 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 4314 *lbsp = value; 4315 break; 4316 4317 case LB_GET_INFO: 4318 value = sizeof (lb_normal); 4319 value += sizeof (lb_mac); 4320 value += sizeof (lb_external); 4321 4322 size = value; 4323 if (iocp->ioc_count != size) 4324 return (IOC_INVAL); 4325 4326 value = 0; 4327 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 4328 4329 lbpp[value++] = lb_normal; 4330 lbpp[value++] = lb_mac; 4331 lbpp[value++] = lb_external; 4332 break; 4333 4334 case LB_GET_MODE: 4335 size = sizeof (uint32_t); 4336 if (iocp->ioc_count != size) 4337 return (IOC_INVAL); 4338 4339 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4340 *lbmp = ixgbe->loopback_mode; 4341 break; 4342 4343 case LB_SET_MODE: 4344 size = 0; 4345 if (iocp->ioc_count != sizeof (uint32_t)) 4346 return (IOC_INVAL); 4347 4348 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4349 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 4350 return (IOC_INVAL); 4351 break; 4352 } 4353 4354 iocp->ioc_count = size; 4355 iocp->ioc_error = 0; 4356 4357 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4358 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4359 return (IOC_INVAL); 4360 } 4361 4362 return (IOC_REPLY); 4363 } 4364 4365 /* 4366 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 4367 */ 4368 static boolean_t 4369 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 4370 { 4371 if (mode == ixgbe->loopback_mode) 4372 return (B_TRUE); 4373 4374 ixgbe->loopback_mode = mode; 4375 4376 if (mode == IXGBE_LB_NONE) { 4377 /* 4378 * Reset the chip 4379 */ 4380 (void) ixgbe_reset(ixgbe); 4381 return (B_TRUE); 4382 } 4383 4384 mutex_enter(&ixgbe->gen_lock); 4385 4386 switch (mode) { 4387 default: 4388 mutex_exit(&ixgbe->gen_lock); 4389 return (B_FALSE); 4390 4391 case IXGBE_LB_EXTERNAL: 4392 break; 4393 4394 case IXGBE_LB_INTERNAL_MAC: 4395 ixgbe_set_internal_mac_loopback(ixgbe); 4396 break; 4397 } 4398 4399 mutex_exit(&ixgbe->gen_lock); 4400 4401 return (B_TRUE); 4402 } 4403 4404 /* 4405 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 4406 */ 4407 static void 4408 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 4409 { 4410 struct ixgbe_hw *hw; 4411 uint32_t reg; 4412 uint8_t atlas; 4413 4414 hw = &ixgbe->hw; 4415 4416 /* 4417 * Setup MAC loopback 4418 */ 4419 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 4420 reg |= IXGBE_HLREG0_LPBK; 4421 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 4422 4423 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4424 reg &= ~IXGBE_AUTOC_LMS_MASK; 4425 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4426 4427 /* 4428 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 4429 */ 4430 switch (hw->mac.type) { 4431 case ixgbe_mac_82598EB: 4432 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4433 &atlas); 4434 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 4435 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4436 atlas); 4437 4438 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4439 &atlas); 4440 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 4441 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4442 atlas); 4443 4444 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4445 &atlas); 4446 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4447 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4448 atlas); 4449 4450 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4451 &atlas); 4452 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4453 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4454 atlas); 4455 break; 4456 4457 case ixgbe_mac_82599EB: 4458 case ixgbe_mac_X540: 4459 case ixgbe_mac_X550: 4460 case ixgbe_mac_X550EM_x: 4461 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4462 reg |= (IXGBE_AUTOC_FLU | 4463 IXGBE_AUTOC_10G_KX4); 4464 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4465 4466 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4467 B_FALSE); 4468 break; 4469 4470 default: 4471 break; 4472 } 4473 } 4474 4475 #pragma inline(ixgbe_intr_rx_work) 4476 /* 4477 * ixgbe_intr_rx_work - RX processing of ISR. 4478 */ 4479 static void 4480 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4481 { 4482 mblk_t *mp; 4483 4484 mutex_enter(&rx_ring->rx_lock); 4485 4486 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4487 mutex_exit(&rx_ring->rx_lock); 4488 4489 if (mp != NULL) 4490 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4491 rx_ring->ring_gen_num); 4492 } 4493 4494 #pragma inline(ixgbe_intr_tx_work) 4495 /* 4496 * ixgbe_intr_tx_work - TX processing of ISR. 4497 */ 4498 static void 4499 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4500 { 4501 ixgbe_t *ixgbe = tx_ring->ixgbe; 4502 4503 /* 4504 * Recycle the tx descriptors 4505 */ 4506 tx_ring->tx_recycle(tx_ring); 4507 4508 /* 4509 * Schedule the re-transmit 4510 */ 4511 if (tx_ring->reschedule && 4512 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4513 tx_ring->reschedule = B_FALSE; 4514 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4515 tx_ring->ring_handle); 4516 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4517 } 4518 } 4519 4520 #pragma inline(ixgbe_intr_other_work) 4521 /* 4522 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4523 */ 4524 static void 4525 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4526 { 4527 struct ixgbe_hw *hw = &ixgbe->hw; 4528 4529 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4530 4531 /* 4532 * handle link status change 4533 */ 4534 if (eicr & IXGBE_EICR_LSC) { 4535 ixgbe_driver_link_check(ixgbe); 4536 ixgbe_get_hw_state(ixgbe); 4537 } 4538 4539 /* 4540 * check for fan failure on adapters with fans 4541 */ 4542 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4543 (eicr & IXGBE_EICR_GPI_SDP1)) { 4544 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4545 4546 /* 4547 * Disable the adapter interrupts 4548 */ 4549 ixgbe_disable_adapter_interrupts(ixgbe); 4550 4551 /* 4552 * Disable Rx/Tx units 4553 */ 4554 (void) ixgbe_stop_adapter(&ixgbe->hw); 4555 4556 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4557 ixgbe_error(ixgbe, 4558 "Problem: Network adapter has been stopped " 4559 "because the fan has stopped.\n"); 4560 ixgbe_error(ixgbe, 4561 "Action: Replace the adapter.\n"); 4562 4563 /* re-enable the interrupt, which was automasked */ 4564 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4565 } 4566 4567 /* 4568 * Do SFP check for adapters with hot-plug capability 4569 */ 4570 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4571 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) || 4572 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) { 4573 ixgbe->eicr = eicr; 4574 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4575 ixgbe_sfp_check, (void *)ixgbe, 4576 DDI_NOSLEEP)) != DDI_SUCCESS) { 4577 ixgbe_log(ixgbe, "No memory available to dispatch " 4578 "taskq for SFP check"); 4579 } 4580 } 4581 4582 /* 4583 * Do over-temperature check for adapters with temp sensor 4584 */ 4585 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4586 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || 4587 (eicr & IXGBE_EICR_LSC))) { 4588 ixgbe->eicr = eicr; 4589 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4590 ixgbe_overtemp_check, (void *)ixgbe, 4591 DDI_NOSLEEP)) != DDI_SUCCESS) { 4592 ixgbe_log(ixgbe, "No memory available to dispatch " 4593 "taskq for overtemp check"); 4594 } 4595 } 4596 4597 /* 4598 * Process an external PHY interrupt 4599 */ 4600 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 4601 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 4602 ixgbe->eicr = eicr; 4603 if ((ddi_taskq_dispatch(ixgbe->phy_taskq, 4604 ixgbe_phy_check, (void *)ixgbe, 4605 DDI_NOSLEEP)) != DDI_SUCCESS) { 4606 ixgbe_log(ixgbe, "No memory available to dispatch " 4607 "taskq for PHY check"); 4608 } 4609 } 4610 } 4611 4612 /* 4613 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4614 */ 4615 static uint_t 4616 ixgbe_intr_legacy(void *arg1, void *arg2) 4617 { 4618 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4619 struct ixgbe_hw *hw = &ixgbe->hw; 4620 ixgbe_tx_ring_t *tx_ring; 4621 ixgbe_rx_ring_t *rx_ring; 4622 uint32_t eicr; 4623 mblk_t *mp; 4624 boolean_t tx_reschedule; 4625 uint_t result; 4626 4627 _NOTE(ARGUNUSED(arg2)); 4628 4629 mutex_enter(&ixgbe->gen_lock); 4630 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4631 mutex_exit(&ixgbe->gen_lock); 4632 return (DDI_INTR_UNCLAIMED); 4633 } 4634 4635 mp = NULL; 4636 tx_reschedule = B_FALSE; 4637 4638 /* 4639 * Any bit set in eicr: claim this interrupt 4640 */ 4641 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4642 4643 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4644 mutex_exit(&ixgbe->gen_lock); 4645 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4646 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4647 return (DDI_INTR_CLAIMED); 4648 } 4649 4650 if (eicr) { 4651 /* 4652 * For legacy interrupt, we have only one interrupt, 4653 * so we have only one rx ring and one tx ring enabled. 4654 */ 4655 ASSERT(ixgbe->num_rx_rings == 1); 4656 ASSERT(ixgbe->num_tx_rings == 1); 4657 4658 /* 4659 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4660 */ 4661 if (eicr & 0x1) { 4662 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4663 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4664 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4665 /* 4666 * Clean the rx descriptors 4667 */ 4668 rx_ring = &ixgbe->rx_rings[0]; 4669 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4670 } 4671 4672 /* 4673 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4674 */ 4675 if (eicr & 0x2) { 4676 /* 4677 * Recycle the tx descriptors 4678 */ 4679 tx_ring = &ixgbe->tx_rings[0]; 4680 tx_ring->tx_recycle(tx_ring); 4681 4682 /* 4683 * Schedule the re-transmit 4684 */ 4685 tx_reschedule = (tx_ring->reschedule && 4686 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4687 } 4688 4689 /* any interrupt type other than tx/rx */ 4690 if (eicr & ixgbe->capab->other_intr) { 4691 switch (hw->mac.type) { 4692 case ixgbe_mac_82598EB: 4693 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4694 break; 4695 4696 case ixgbe_mac_82599EB: 4697 case ixgbe_mac_X540: 4698 case ixgbe_mac_X550: 4699 case ixgbe_mac_X550EM_x: 4700 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4701 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4702 break; 4703 4704 default: 4705 break; 4706 } 4707 ixgbe_intr_other_work(ixgbe, eicr); 4708 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4709 } 4710 4711 mutex_exit(&ixgbe->gen_lock); 4712 4713 result = DDI_INTR_CLAIMED; 4714 } else { 4715 mutex_exit(&ixgbe->gen_lock); 4716 4717 /* 4718 * No interrupt cause bits set: don't claim this interrupt. 4719 */ 4720 result = DDI_INTR_UNCLAIMED; 4721 } 4722 4723 /* re-enable the interrupts which were automasked */ 4724 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4725 4726 /* 4727 * Do the following work outside of the gen_lock 4728 */ 4729 if (mp != NULL) { 4730 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4731 rx_ring->ring_gen_num); 4732 } 4733 4734 if (tx_reschedule) { 4735 tx_ring->reschedule = B_FALSE; 4736 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4737 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4738 } 4739 4740 return (result); 4741 } 4742 4743 /* 4744 * ixgbe_intr_msi - Interrupt handler for MSI. 4745 */ 4746 static uint_t 4747 ixgbe_intr_msi(void *arg1, void *arg2) 4748 { 4749 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4750 struct ixgbe_hw *hw = &ixgbe->hw; 4751 uint32_t eicr; 4752 4753 _NOTE(ARGUNUSED(arg2)); 4754 4755 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4756 4757 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4758 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4759 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4760 return (DDI_INTR_CLAIMED); 4761 } 4762 4763 /* 4764 * For MSI interrupt, we have only one vector, 4765 * so we have only one rx ring and one tx ring enabled. 4766 */ 4767 ASSERT(ixgbe->num_rx_rings == 1); 4768 ASSERT(ixgbe->num_tx_rings == 1); 4769 4770 /* 4771 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4772 */ 4773 if (eicr & 0x1) { 4774 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4775 } 4776 4777 /* 4778 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4779 */ 4780 if (eicr & 0x2) { 4781 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4782 } 4783 4784 /* any interrupt type other than tx/rx */ 4785 if (eicr & ixgbe->capab->other_intr) { 4786 mutex_enter(&ixgbe->gen_lock); 4787 switch (hw->mac.type) { 4788 case ixgbe_mac_82598EB: 4789 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4790 break; 4791 4792 case ixgbe_mac_82599EB: 4793 case ixgbe_mac_X540: 4794 case ixgbe_mac_X550: 4795 case ixgbe_mac_X550EM_x: 4796 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4797 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4798 break; 4799 4800 default: 4801 break; 4802 } 4803 ixgbe_intr_other_work(ixgbe, eicr); 4804 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4805 mutex_exit(&ixgbe->gen_lock); 4806 } 4807 4808 /* re-enable the interrupts which were automasked */ 4809 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4810 4811 return (DDI_INTR_CLAIMED); 4812 } 4813 4814 /* 4815 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4816 */ 4817 static uint_t 4818 ixgbe_intr_msix(void *arg1, void *arg2) 4819 { 4820 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4821 ixgbe_t *ixgbe = vect->ixgbe; 4822 struct ixgbe_hw *hw = &ixgbe->hw; 4823 uint32_t eicr; 4824 int r_idx = 0; 4825 4826 _NOTE(ARGUNUSED(arg2)); 4827 4828 /* 4829 * Clean each rx ring that has its bit set in the map 4830 */ 4831 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4832 while (r_idx >= 0) { 4833 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4834 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4835 (ixgbe->num_rx_rings - 1)); 4836 } 4837 4838 /* 4839 * Clean each tx ring that has its bit set in the map 4840 */ 4841 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4842 while (r_idx >= 0) { 4843 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4844 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4845 (ixgbe->num_tx_rings - 1)); 4846 } 4847 4848 4849 /* 4850 * Clean other interrupt (link change) that has its bit set in the map 4851 */ 4852 if (BT_TEST(vect->other_map, 0) == 1) { 4853 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4854 4855 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4856 DDI_FM_OK) { 4857 ddi_fm_service_impact(ixgbe->dip, 4858 DDI_SERVICE_DEGRADED); 4859 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4860 return (DDI_INTR_CLAIMED); 4861 } 4862 4863 /* 4864 * Check "other" cause bits: any interrupt type other than tx/rx 4865 */ 4866 if (eicr & ixgbe->capab->other_intr) { 4867 mutex_enter(&ixgbe->gen_lock); 4868 switch (hw->mac.type) { 4869 case ixgbe_mac_82598EB: 4870 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4871 ixgbe_intr_other_work(ixgbe, eicr); 4872 break; 4873 4874 case ixgbe_mac_82599EB: 4875 case ixgbe_mac_X540: 4876 case ixgbe_mac_X550: 4877 case ixgbe_mac_X550EM_x: 4878 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4879 ixgbe_intr_other_work(ixgbe, eicr); 4880 break; 4881 4882 default: 4883 break; 4884 } 4885 mutex_exit(&ixgbe->gen_lock); 4886 } 4887 4888 /* re-enable the interrupts which were automasked */ 4889 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4890 } 4891 4892 return (DDI_INTR_CLAIMED); 4893 } 4894 4895 /* 4896 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4897 * 4898 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4899 * if not successful, try Legacy. 4900 * ixgbe->intr_force can be used to force sequence to start with 4901 * any of the 3 types. 4902 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4903 */ 4904 static int 4905 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4906 { 4907 dev_info_t *devinfo; 4908 int intr_types; 4909 int rc; 4910 4911 devinfo = ixgbe->dip; 4912 4913 /* 4914 * Get supported interrupt types 4915 */ 4916 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4917 4918 if (rc != DDI_SUCCESS) { 4919 ixgbe_log(ixgbe, 4920 "Get supported interrupt types failed: %d", rc); 4921 return (IXGBE_FAILURE); 4922 } 4923 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4924 4925 ixgbe->intr_type = 0; 4926 4927 /* 4928 * Install MSI-X interrupts 4929 */ 4930 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4931 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4932 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4933 if (rc == IXGBE_SUCCESS) 4934 return (IXGBE_SUCCESS); 4935 4936 ixgbe_log(ixgbe, 4937 "Allocate MSI-X failed, trying MSI interrupts..."); 4938 } 4939 4940 /* 4941 * MSI-X not used, force rings and groups to 1 4942 */ 4943 ixgbe->num_rx_rings = 1; 4944 ixgbe->num_rx_groups = 1; 4945 ixgbe->num_tx_rings = 1; 4946 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4947 ixgbe_log(ixgbe, 4948 "MSI-X not used, force rings and groups number to 1"); 4949 4950 /* 4951 * Install MSI interrupts 4952 */ 4953 if ((intr_types & DDI_INTR_TYPE_MSI) && 4954 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4955 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 4956 if (rc == IXGBE_SUCCESS) 4957 return (IXGBE_SUCCESS); 4958 4959 ixgbe_log(ixgbe, 4960 "Allocate MSI failed, trying Legacy interrupts..."); 4961 } 4962 4963 /* 4964 * Install legacy interrupts 4965 */ 4966 if (intr_types & DDI_INTR_TYPE_FIXED) { 4967 /* 4968 * Disallow legacy interrupts for X550. X550 has a silicon 4969 * bug which prevents Shared Legacy interrupts from working. 4970 * For details, please reference: 4971 * 4972 * Intel Ethernet Controller X550 Specification Update rev. 2.1 4973 * May 2016, erratum 22: PCIe Interrupt Status Bit 4974 */ 4975 if (ixgbe->hw.mac.type == ixgbe_mac_X550 || 4976 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x || 4977 ixgbe->hw.mac.type == ixgbe_mac_X550_vf || 4978 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) { 4979 ixgbe_log(ixgbe, 4980 "Legacy interrupts are not supported on this " 4981 "adapter. Please use MSI or MSI-X instead."); 4982 return (IXGBE_FAILURE); 4983 } 4984 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 4985 if (rc == IXGBE_SUCCESS) 4986 return (IXGBE_SUCCESS); 4987 4988 ixgbe_log(ixgbe, 4989 "Allocate Legacy interrupts failed"); 4990 } 4991 4992 /* 4993 * If none of the 3 types succeeded, return failure 4994 */ 4995 return (IXGBE_FAILURE); 4996 } 4997 4998 /* 4999 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 5000 * 5001 * For legacy and MSI, only 1 handle is needed. For MSI-X, 5002 * if fewer than 2 handles are available, return failure. 5003 * Upon success, this maps the vectors to rx and tx rings for 5004 * interrupts. 5005 */ 5006 static int 5007 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 5008 { 5009 dev_info_t *devinfo; 5010 int request, count, actual; 5011 int minimum; 5012 int rc; 5013 uint32_t ring_per_group; 5014 5015 devinfo = ixgbe->dip; 5016 5017 switch (intr_type) { 5018 case DDI_INTR_TYPE_FIXED: 5019 request = 1; /* Request 1 legacy interrupt handle */ 5020 minimum = 1; 5021 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 5022 break; 5023 5024 case DDI_INTR_TYPE_MSI: 5025 request = 1; /* Request 1 MSI interrupt handle */ 5026 minimum = 1; 5027 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 5028 break; 5029 5030 case DDI_INTR_TYPE_MSIX: 5031 /* 5032 * Best number of vectors for the adapter is 5033 * (# rx rings + # tx rings), however we will 5034 * limit the request number. 5035 */ 5036 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 5037 if (request > ixgbe->capab->max_ring_vect) 5038 request = ixgbe->capab->max_ring_vect; 5039 minimum = 1; 5040 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 5041 break; 5042 5043 default: 5044 ixgbe_log(ixgbe, 5045 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 5046 intr_type); 5047 return (IXGBE_FAILURE); 5048 } 5049 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 5050 request, minimum); 5051 5052 /* 5053 * Get number of supported interrupts 5054 */ 5055 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5056 if ((rc != DDI_SUCCESS) || (count < minimum)) { 5057 ixgbe_log(ixgbe, 5058 "Get interrupt number failed. Return: %d, count: %d", 5059 rc, count); 5060 return (IXGBE_FAILURE); 5061 } 5062 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 5063 5064 actual = 0; 5065 ixgbe->intr_cnt = 0; 5066 ixgbe->intr_cnt_max = 0; 5067 ixgbe->intr_cnt_min = 0; 5068 5069 /* 5070 * Allocate an array of interrupt handles 5071 */ 5072 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 5073 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 5074 5075 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 5076 request, &actual, DDI_INTR_ALLOC_NORMAL); 5077 if (rc != DDI_SUCCESS) { 5078 ixgbe_log(ixgbe, "Allocate interrupts failed. " 5079 "return: %d, request: %d, actual: %d", 5080 rc, request, actual); 5081 goto alloc_handle_fail; 5082 } 5083 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 5084 5085 /* 5086 * upper/lower limit of interrupts 5087 */ 5088 ixgbe->intr_cnt = actual; 5089 ixgbe->intr_cnt_max = request; 5090 ixgbe->intr_cnt_min = minimum; 5091 5092 /* 5093 * rss number per group should not exceed the rx interrupt number, 5094 * else need to adjust rx ring number. 5095 */ 5096 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5097 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 5098 if (actual < ring_per_group) { 5099 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 5100 ixgbe_setup_vmdq_rss_conf(ixgbe); 5101 } 5102 5103 /* 5104 * Now we know the actual number of vectors. Here we map the vector 5105 * to other, rx rings and tx ring. 5106 */ 5107 if (actual < minimum) { 5108 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 5109 actual); 5110 goto alloc_handle_fail; 5111 } 5112 5113 /* 5114 * Get priority for first vector, assume remaining are all the same 5115 */ 5116 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 5117 if (rc != DDI_SUCCESS) { 5118 ixgbe_log(ixgbe, 5119 "Get interrupt priority failed: %d", rc); 5120 goto alloc_handle_fail; 5121 } 5122 5123 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 5124 if (rc != DDI_SUCCESS) { 5125 ixgbe_log(ixgbe, 5126 "Get interrupt cap failed: %d", rc); 5127 goto alloc_handle_fail; 5128 } 5129 5130 ixgbe->intr_type = intr_type; 5131 5132 return (IXGBE_SUCCESS); 5133 5134 alloc_handle_fail: 5135 ixgbe_rem_intrs(ixgbe); 5136 5137 return (IXGBE_FAILURE); 5138 } 5139 5140 /* 5141 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 5142 * 5143 * Before adding the interrupt handlers, the interrupt vectors have 5144 * been allocated, and the rx/tx rings have also been allocated. 5145 */ 5146 static int 5147 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 5148 { 5149 int vector = 0; 5150 int rc; 5151 5152 switch (ixgbe->intr_type) { 5153 case DDI_INTR_TYPE_MSIX: 5154 /* 5155 * Add interrupt handler for all vectors 5156 */ 5157 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 5158 /* 5159 * install pointer to vect_map[vector] 5160 */ 5161 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5162 (ddi_intr_handler_t *)ixgbe_intr_msix, 5163 (void *)&ixgbe->vect_map[vector], NULL); 5164 5165 if (rc != DDI_SUCCESS) { 5166 ixgbe_log(ixgbe, 5167 "Add interrupt handler failed. " 5168 "return: %d, vector: %d", rc, vector); 5169 for (vector--; vector >= 0; vector--) { 5170 (void) ddi_intr_remove_handler( 5171 ixgbe->htable[vector]); 5172 } 5173 return (IXGBE_FAILURE); 5174 } 5175 } 5176 5177 break; 5178 5179 case DDI_INTR_TYPE_MSI: 5180 /* 5181 * Add interrupt handlers for the only vector 5182 */ 5183 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5184 (ddi_intr_handler_t *)ixgbe_intr_msi, 5185 (void *)ixgbe, NULL); 5186 5187 if (rc != DDI_SUCCESS) { 5188 ixgbe_log(ixgbe, 5189 "Add MSI interrupt handler failed: %d", rc); 5190 return (IXGBE_FAILURE); 5191 } 5192 5193 break; 5194 5195 case DDI_INTR_TYPE_FIXED: 5196 /* 5197 * Add interrupt handlers for the only vector 5198 */ 5199 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5200 (ddi_intr_handler_t *)ixgbe_intr_legacy, 5201 (void *)ixgbe, NULL); 5202 5203 if (rc != DDI_SUCCESS) { 5204 ixgbe_log(ixgbe, 5205 "Add legacy interrupt handler failed: %d", rc); 5206 return (IXGBE_FAILURE); 5207 } 5208 5209 break; 5210 5211 default: 5212 return (IXGBE_FAILURE); 5213 } 5214 5215 return (IXGBE_SUCCESS); 5216 } 5217 5218 #pragma inline(ixgbe_map_rxring_to_vector) 5219 /* 5220 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 5221 */ 5222 static void 5223 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 5224 { 5225 /* 5226 * Set bit in map 5227 */ 5228 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5229 5230 /* 5231 * Count bits set 5232 */ 5233 ixgbe->vect_map[v_idx].rxr_cnt++; 5234 5235 /* 5236 * Remember bit position 5237 */ 5238 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 5239 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 5240 } 5241 5242 #pragma inline(ixgbe_map_txring_to_vector) 5243 /* 5244 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 5245 */ 5246 static void 5247 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 5248 { 5249 /* 5250 * Set bit in map 5251 */ 5252 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 5253 5254 /* 5255 * Count bits set 5256 */ 5257 ixgbe->vect_map[v_idx].txr_cnt++; 5258 5259 /* 5260 * Remember bit position 5261 */ 5262 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 5263 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 5264 } 5265 5266 /* 5267 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 5268 * allocation register (IVAR). 5269 * cause: 5270 * -1 : other cause 5271 * 0 : rx 5272 * 1 : tx 5273 */ 5274 static void 5275 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 5276 int8_t cause) 5277 { 5278 struct ixgbe_hw *hw = &ixgbe->hw; 5279 u32 ivar, index; 5280 5281 switch (hw->mac.type) { 5282 case ixgbe_mac_82598EB: 5283 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5284 if (cause == -1) { 5285 cause = 0; 5286 } 5287 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5288 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5289 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 5290 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 5291 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5292 break; 5293 5294 case ixgbe_mac_82599EB: 5295 case ixgbe_mac_X540: 5296 case ixgbe_mac_X550: 5297 case ixgbe_mac_X550EM_x: 5298 if (cause == -1) { 5299 /* other causes */ 5300 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5301 index = (intr_alloc_entry & 1) * 8; 5302 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5303 ivar &= ~(0xFF << index); 5304 ivar |= (msix_vector << index); 5305 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5306 } else { 5307 /* tx or rx causes */ 5308 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5309 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5310 ivar = IXGBE_READ_REG(hw, 5311 IXGBE_IVAR(intr_alloc_entry >> 1)); 5312 ivar &= ~(0xFF << index); 5313 ivar |= (msix_vector << index); 5314 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5315 ivar); 5316 } 5317 break; 5318 5319 default: 5320 break; 5321 } 5322 } 5323 5324 /* 5325 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 5326 * given interrupt vector allocation register (IVAR). 5327 * cause: 5328 * -1 : other cause 5329 * 0 : rx 5330 * 1 : tx 5331 */ 5332 static void 5333 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5334 { 5335 struct ixgbe_hw *hw = &ixgbe->hw; 5336 u32 ivar, index; 5337 5338 switch (hw->mac.type) { 5339 case ixgbe_mac_82598EB: 5340 if (cause == -1) { 5341 cause = 0; 5342 } 5343 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5344 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5345 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 5346 (intr_alloc_entry & 0x3))); 5347 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5348 break; 5349 5350 case ixgbe_mac_82599EB: 5351 case ixgbe_mac_X540: 5352 case ixgbe_mac_X550: 5353 case ixgbe_mac_X550EM_x: 5354 if (cause == -1) { 5355 /* other causes */ 5356 index = (intr_alloc_entry & 1) * 8; 5357 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5358 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5359 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5360 } else { 5361 /* tx or rx causes */ 5362 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5363 ivar = IXGBE_READ_REG(hw, 5364 IXGBE_IVAR(intr_alloc_entry >> 1)); 5365 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5366 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5367 ivar); 5368 } 5369 break; 5370 5371 default: 5372 break; 5373 } 5374 } 5375 5376 /* 5377 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 5378 * given interrupt vector allocation register (IVAR). 5379 * cause: 5380 * -1 : other cause 5381 * 0 : rx 5382 * 1 : tx 5383 */ 5384 static void 5385 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5386 { 5387 struct ixgbe_hw *hw = &ixgbe->hw; 5388 u32 ivar, index; 5389 5390 switch (hw->mac.type) { 5391 case ixgbe_mac_82598EB: 5392 if (cause == -1) { 5393 cause = 0; 5394 } 5395 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5396 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5397 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 5398 (intr_alloc_entry & 0x3))); 5399 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5400 break; 5401 5402 case ixgbe_mac_82599EB: 5403 case ixgbe_mac_X540: 5404 case ixgbe_mac_X550: 5405 case ixgbe_mac_X550EM_x: 5406 if (cause == -1) { 5407 /* other causes */ 5408 index = (intr_alloc_entry & 1) * 8; 5409 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5410 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5411 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5412 } else { 5413 /* tx or rx causes */ 5414 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5415 ivar = IXGBE_READ_REG(hw, 5416 IXGBE_IVAR(intr_alloc_entry >> 1)); 5417 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5418 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5419 ivar); 5420 } 5421 break; 5422 5423 default: 5424 break; 5425 } 5426 } 5427 5428 /* 5429 * Convert the rx ring index driver maintained to the rx ring index 5430 * in h/w. 5431 */ 5432 static uint32_t 5433 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 5434 { 5435 5436 struct ixgbe_hw *hw = &ixgbe->hw; 5437 uint32_t rx_ring_per_group, hw_rx_index; 5438 5439 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 5440 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 5441 return (sw_rx_index); 5442 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 5443 switch (hw->mac.type) { 5444 case ixgbe_mac_82598EB: 5445 return (sw_rx_index); 5446 5447 case ixgbe_mac_82599EB: 5448 case ixgbe_mac_X540: 5449 case ixgbe_mac_X550: 5450 case ixgbe_mac_X550EM_x: 5451 return (sw_rx_index * 2); 5452 5453 default: 5454 break; 5455 } 5456 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 5457 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5458 5459 switch (hw->mac.type) { 5460 case ixgbe_mac_82598EB: 5461 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 5462 16 + (sw_rx_index % rx_ring_per_group); 5463 return (hw_rx_index); 5464 5465 case ixgbe_mac_82599EB: 5466 case ixgbe_mac_X540: 5467 case ixgbe_mac_X550: 5468 case ixgbe_mac_X550EM_x: 5469 if (ixgbe->num_rx_groups > 32) { 5470 hw_rx_index = (sw_rx_index / 5471 rx_ring_per_group) * 2 + 5472 (sw_rx_index % rx_ring_per_group); 5473 } else { 5474 hw_rx_index = (sw_rx_index / 5475 rx_ring_per_group) * 4 + 5476 (sw_rx_index % rx_ring_per_group); 5477 } 5478 return (hw_rx_index); 5479 5480 default: 5481 break; 5482 } 5483 } 5484 5485 /* 5486 * Should never reach. Just to make compiler happy. 5487 */ 5488 return (sw_rx_index); 5489 } 5490 5491 /* 5492 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 5493 * 5494 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5495 * to vector[0 - (intr_cnt -1)]. 5496 */ 5497 static int 5498 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5499 { 5500 int i, vector = 0; 5501 5502 /* initialize vector map */ 5503 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5504 for (i = 0; i < ixgbe->intr_cnt; i++) { 5505 ixgbe->vect_map[i].ixgbe = ixgbe; 5506 } 5507 5508 /* 5509 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5510 * tx rings[0] on RTxQ[1]. 5511 */ 5512 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5513 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5514 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5515 return (IXGBE_SUCCESS); 5516 } 5517 5518 /* 5519 * Interrupts/vectors mapping for MSI-X 5520 */ 5521 5522 /* 5523 * Map other interrupt to vector 0, 5524 * Set bit in map and count the bits set. 5525 */ 5526 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5527 ixgbe->vect_map[vector].other_cnt++; 5528 5529 /* 5530 * Map rx ring interrupts to vectors 5531 */ 5532 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5533 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5534 vector = (vector +1) % ixgbe->intr_cnt; 5535 } 5536 5537 /* 5538 * Map tx ring interrupts to vectors 5539 */ 5540 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5541 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5542 vector = (vector +1) % ixgbe->intr_cnt; 5543 } 5544 5545 return (IXGBE_SUCCESS); 5546 } 5547 5548 /* 5549 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5550 * 5551 * This relies on ring/vector mapping already set up in the 5552 * vect_map[] structures 5553 */ 5554 static void 5555 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5556 { 5557 struct ixgbe_hw *hw = &ixgbe->hw; 5558 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5559 int r_idx; /* ring index */ 5560 int v_idx; /* vector index */ 5561 uint32_t hw_index; 5562 5563 /* 5564 * Clear any previous entries 5565 */ 5566 switch (hw->mac.type) { 5567 case ixgbe_mac_82598EB: 5568 for (v_idx = 0; v_idx < 25; v_idx++) 5569 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5570 break; 5571 5572 case ixgbe_mac_82599EB: 5573 case ixgbe_mac_X540: 5574 case ixgbe_mac_X550: 5575 case ixgbe_mac_X550EM_x: 5576 for (v_idx = 0; v_idx < 64; v_idx++) 5577 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5578 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5579 break; 5580 5581 default: 5582 break; 5583 } 5584 5585 /* 5586 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5587 * tx rings[0] will use RTxQ[1]. 5588 */ 5589 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5590 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5591 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5592 return; 5593 } 5594 5595 /* 5596 * For MSI-X interrupt, "Other" is always on vector[0]. 5597 */ 5598 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5599 5600 /* 5601 * For each interrupt vector, populate the IVAR table 5602 */ 5603 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5604 vect = &ixgbe->vect_map[v_idx]; 5605 5606 /* 5607 * For each rx ring bit set 5608 */ 5609 r_idx = bt_getlowbit(vect->rx_map, 0, 5610 (ixgbe->num_rx_rings - 1)); 5611 5612 while (r_idx >= 0) { 5613 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5614 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5615 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5616 (ixgbe->num_rx_rings - 1)); 5617 } 5618 5619 /* 5620 * For each tx ring bit set 5621 */ 5622 r_idx = bt_getlowbit(vect->tx_map, 0, 5623 (ixgbe->num_tx_rings - 1)); 5624 5625 while (r_idx >= 0) { 5626 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5627 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5628 (ixgbe->num_tx_rings - 1)); 5629 } 5630 } 5631 } 5632 5633 /* 5634 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5635 */ 5636 static void 5637 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5638 { 5639 int i; 5640 int rc; 5641 5642 for (i = 0; i < ixgbe->intr_cnt; i++) { 5643 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5644 if (rc != DDI_SUCCESS) { 5645 IXGBE_DEBUGLOG_1(ixgbe, 5646 "Remove intr handler failed: %d", rc); 5647 } 5648 } 5649 } 5650 5651 /* 5652 * ixgbe_rem_intrs - Remove the allocated interrupts. 5653 */ 5654 static void 5655 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5656 { 5657 int i; 5658 int rc; 5659 5660 for (i = 0; i < ixgbe->intr_cnt; i++) { 5661 rc = ddi_intr_free(ixgbe->htable[i]); 5662 if (rc != DDI_SUCCESS) { 5663 IXGBE_DEBUGLOG_1(ixgbe, 5664 "Free intr failed: %d", rc); 5665 } 5666 } 5667 5668 kmem_free(ixgbe->htable, ixgbe->intr_size); 5669 ixgbe->htable = NULL; 5670 } 5671 5672 /* 5673 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5674 */ 5675 static int 5676 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5677 { 5678 int i; 5679 int rc; 5680 5681 /* 5682 * Enable interrupts 5683 */ 5684 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5685 /* 5686 * Call ddi_intr_block_enable() for MSI 5687 */ 5688 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5689 if (rc != DDI_SUCCESS) { 5690 ixgbe_log(ixgbe, 5691 "Enable block intr failed: %d", rc); 5692 return (IXGBE_FAILURE); 5693 } 5694 } else { 5695 /* 5696 * Call ddi_intr_enable() for Legacy/MSI non block enable 5697 */ 5698 for (i = 0; i < ixgbe->intr_cnt; i++) { 5699 rc = ddi_intr_enable(ixgbe->htable[i]); 5700 if (rc != DDI_SUCCESS) { 5701 ixgbe_log(ixgbe, 5702 "Enable intr failed: %d", rc); 5703 return (IXGBE_FAILURE); 5704 } 5705 } 5706 } 5707 5708 return (IXGBE_SUCCESS); 5709 } 5710 5711 /* 5712 * ixgbe_disable_intrs - Disable all the interrupts. 5713 */ 5714 static int 5715 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5716 { 5717 int i; 5718 int rc; 5719 5720 /* 5721 * Disable all interrupts 5722 */ 5723 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5724 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5725 if (rc != DDI_SUCCESS) { 5726 ixgbe_log(ixgbe, 5727 "Disable block intr failed: %d", rc); 5728 return (IXGBE_FAILURE); 5729 } 5730 } else { 5731 for (i = 0; i < ixgbe->intr_cnt; i++) { 5732 rc = ddi_intr_disable(ixgbe->htable[i]); 5733 if (rc != DDI_SUCCESS) { 5734 ixgbe_log(ixgbe, 5735 "Disable intr failed: %d", rc); 5736 return (IXGBE_FAILURE); 5737 } 5738 } 5739 } 5740 5741 return (IXGBE_SUCCESS); 5742 } 5743 5744 /* 5745 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5746 */ 5747 static void 5748 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5749 { 5750 struct ixgbe_hw *hw = &ixgbe->hw; 5751 ixgbe_link_speed speed = 0; 5752 boolean_t link_up = B_FALSE; 5753 uint32_t pcs1g_anlp = 0; 5754 5755 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5756 ixgbe->param_lp_1000fdx_cap = 0; 5757 ixgbe->param_lp_100fdx_cap = 0; 5758 5759 /* check for link, don't wait */ 5760 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 5761 5762 /* 5763 * Update the observed Link Partner's capabilities. Not all adapters 5764 * can provide full information on the LP's capable speeds, so we 5765 * provide what we can. 5766 */ 5767 if (link_up) { 5768 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5769 5770 ixgbe->param_lp_1000fdx_cap = 5771 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5772 ixgbe->param_lp_100fdx_cap = 5773 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5774 } 5775 5776 /* 5777 * Update GLD's notion of the adapter's currently advertised speeds. 5778 * Since the common code doesn't always record the current autonegotiate 5779 * settings in the phy struct for all parts (specifically, adapters with 5780 * SFPs) we first test to see if it is 0, and if so, we fall back to 5781 * using the adapter's speed capabilities which we saved during instance 5782 * init in ixgbe_init_params(). 5783 * 5784 * Adapters with SFPs will always be shown as advertising all of their 5785 * supported speeds, and adapters with baseT PHYs (where the phy struct 5786 * is maintained by the common code) will always have a factual view of 5787 * their currently-advertised speeds. In the case of SFPs, this is 5788 * acceptable as we default to advertising all speeds that the adapter 5789 * claims to support, and those properties are immutable; unlike on 5790 * baseT (copper) PHYs, where speeds can be enabled or disabled at will. 5791 */ 5792 speed = hw->phy.autoneg_advertised; 5793 if (speed == 0) 5794 speed = ixgbe->speeds_supported; 5795 5796 ixgbe->param_adv_10000fdx_cap = 5797 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0; 5798 ixgbe->param_adv_5000fdx_cap = 5799 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0; 5800 ixgbe->param_adv_2500fdx_cap = 5801 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0; 5802 ixgbe->param_adv_1000fdx_cap = 5803 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0; 5804 ixgbe->param_adv_100fdx_cap = 5805 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0; 5806 } 5807 5808 /* 5809 * ixgbe_get_driver_control - Notify that driver is in control of device. 5810 */ 5811 static void 5812 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5813 { 5814 uint32_t ctrl_ext; 5815 5816 /* 5817 * Notify firmware that driver is in control of device 5818 */ 5819 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5820 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5821 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5822 } 5823 5824 /* 5825 * ixgbe_release_driver_control - Notify that driver is no longer in control 5826 * of device. 5827 */ 5828 static void 5829 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5830 { 5831 uint32_t ctrl_ext; 5832 5833 /* 5834 * Notify firmware that driver is no longer in control of device 5835 */ 5836 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5837 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5838 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5839 } 5840 5841 /* 5842 * ixgbe_atomic_reserve - Atomic decrease operation. 5843 */ 5844 int 5845 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5846 { 5847 uint32_t oldval; 5848 uint32_t newval; 5849 5850 /* 5851 * ATOMICALLY 5852 */ 5853 do { 5854 oldval = *count_p; 5855 if (oldval < n) 5856 return (-1); 5857 newval = oldval - n; 5858 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5859 5860 return (newval); 5861 } 5862 5863 /* 5864 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5865 */ 5866 static uint8_t * 5867 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5868 { 5869 uint8_t *addr = *upd_ptr; 5870 uint8_t *new_ptr; 5871 5872 _NOTE(ARGUNUSED(hw)); 5873 _NOTE(ARGUNUSED(vmdq)); 5874 5875 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5876 *upd_ptr = new_ptr; 5877 return (addr); 5878 } 5879 5880 /* 5881 * FMA support 5882 */ 5883 int 5884 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5885 { 5886 ddi_fm_error_t de; 5887 5888 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5889 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5890 return (de.fme_status); 5891 } 5892 5893 int 5894 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5895 { 5896 ddi_fm_error_t de; 5897 5898 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5899 return (de.fme_status); 5900 } 5901 5902 /* 5903 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5904 */ 5905 static int 5906 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5907 { 5908 _NOTE(ARGUNUSED(impl_data)); 5909 /* 5910 * as the driver can always deal with an error in any dma or 5911 * access handle, we can just return the fme_status value. 5912 */ 5913 pci_ereport_post(dip, err, NULL); 5914 return (err->fme_status); 5915 } 5916 5917 static void 5918 ixgbe_fm_init(ixgbe_t *ixgbe) 5919 { 5920 ddi_iblock_cookie_t iblk; 5921 int fma_dma_flag; 5922 5923 /* 5924 * Only register with IO Fault Services if we have some capability 5925 */ 5926 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5927 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5928 } else { 5929 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5930 } 5931 5932 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5933 fma_dma_flag = 1; 5934 } else { 5935 fma_dma_flag = 0; 5936 } 5937 5938 ixgbe_set_fma_flags(fma_dma_flag); 5939 5940 if (ixgbe->fm_capabilities) { 5941 5942 /* 5943 * Register capabilities with IO Fault Services 5944 */ 5945 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5946 5947 /* 5948 * Initialize pci ereport capabilities if ereport capable 5949 */ 5950 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5951 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5952 pci_ereport_setup(ixgbe->dip); 5953 5954 /* 5955 * Register error callback if error callback capable 5956 */ 5957 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5958 ddi_fm_handler_register(ixgbe->dip, 5959 ixgbe_fm_error_cb, (void*) ixgbe); 5960 } 5961 } 5962 5963 static void 5964 ixgbe_fm_fini(ixgbe_t *ixgbe) 5965 { 5966 /* 5967 * Only unregister FMA capabilities if they are registered 5968 */ 5969 if (ixgbe->fm_capabilities) { 5970 5971 /* 5972 * Release any resources allocated by pci_ereport_setup() 5973 */ 5974 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5975 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5976 pci_ereport_teardown(ixgbe->dip); 5977 5978 /* 5979 * Un-register error callback if error callback capable 5980 */ 5981 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5982 ddi_fm_handler_unregister(ixgbe->dip); 5983 5984 /* 5985 * Unregister from IO Fault Service 5986 */ 5987 ddi_fm_fini(ixgbe->dip); 5988 } 5989 } 5990 5991 void 5992 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 5993 { 5994 uint64_t ena; 5995 char buf[FM_MAX_CLASS]; 5996 5997 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5998 ena = fm_ena_generate(0, FM_ENA_FMT1); 5999 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 6000 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 6001 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6002 } 6003 } 6004 6005 static int 6006 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 6007 { 6008 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 6009 6010 mutex_enter(&rx_ring->rx_lock); 6011 rx_ring->ring_gen_num = mr_gen_num; 6012 mutex_exit(&rx_ring->rx_lock); 6013 return (0); 6014 } 6015 6016 /* 6017 * Get the global ring index by a ring index within a group. 6018 */ 6019 static int 6020 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 6021 { 6022 ixgbe_rx_ring_t *rx_ring; 6023 int i; 6024 6025 for (i = 0; i < ixgbe->num_rx_rings; i++) { 6026 rx_ring = &ixgbe->rx_rings[i]; 6027 if (rx_ring->group_index == gindex) 6028 rindex--; 6029 if (rindex < 0) 6030 return (i); 6031 } 6032 6033 return (-1); 6034 } 6035 6036 /* 6037 * Callback funtion for MAC layer to register all rings. 6038 */ 6039 /* ARGSUSED */ 6040 void 6041 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 6042 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 6043 { 6044 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6045 mac_intr_t *mintr = &infop->mri_intr; 6046 6047 switch (rtype) { 6048 case MAC_RING_TYPE_RX: { 6049 /* 6050 * 'index' is the ring index within the group. 6051 * Need to get the global ring index by searching in groups. 6052 */ 6053 int global_ring_index = ixgbe_get_rx_ring_index( 6054 ixgbe, group_index, ring_index); 6055 6056 ASSERT(global_ring_index >= 0); 6057 6058 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 6059 rx_ring->ring_handle = rh; 6060 6061 infop->mri_driver = (mac_ring_driver_t)rx_ring; 6062 infop->mri_start = ixgbe_ring_start; 6063 infop->mri_stop = NULL; 6064 infop->mri_poll = ixgbe_ring_rx_poll; 6065 infop->mri_stat = ixgbe_rx_ring_stat; 6066 6067 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 6068 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 6069 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 6070 if (ixgbe->intr_type & 6071 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6072 mintr->mi_ddi_handle = 6073 ixgbe->htable[rx_ring->intr_vector]; 6074 } 6075 6076 break; 6077 } 6078 case MAC_RING_TYPE_TX: { 6079 ASSERT(group_index == -1); 6080 ASSERT(ring_index < ixgbe->num_tx_rings); 6081 6082 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 6083 tx_ring->ring_handle = rh; 6084 6085 infop->mri_driver = (mac_ring_driver_t)tx_ring; 6086 infop->mri_start = NULL; 6087 infop->mri_stop = NULL; 6088 infop->mri_tx = ixgbe_ring_tx; 6089 infop->mri_stat = ixgbe_tx_ring_stat; 6090 if (ixgbe->intr_type & 6091 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6092 mintr->mi_ddi_handle = 6093 ixgbe->htable[tx_ring->intr_vector]; 6094 } 6095 break; 6096 } 6097 default: 6098 break; 6099 } 6100 } 6101 6102 /* 6103 * Callback funtion for MAC layer to register all groups. 6104 */ 6105 void 6106 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 6107 mac_group_info_t *infop, mac_group_handle_t gh) 6108 { 6109 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6110 6111 switch (rtype) { 6112 case MAC_RING_TYPE_RX: { 6113 ixgbe_rx_group_t *rx_group; 6114 6115 rx_group = &ixgbe->rx_groups[index]; 6116 rx_group->group_handle = gh; 6117 6118 infop->mgi_driver = (mac_group_driver_t)rx_group; 6119 infop->mgi_start = NULL; 6120 infop->mgi_stop = NULL; 6121 infop->mgi_addmac = ixgbe_addmac; 6122 infop->mgi_remmac = ixgbe_remmac; 6123 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 6124 6125 break; 6126 } 6127 case MAC_RING_TYPE_TX: 6128 break; 6129 default: 6130 break; 6131 } 6132 } 6133 6134 /* 6135 * Enable interrupt on the specificed rx ring. 6136 */ 6137 int 6138 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 6139 { 6140 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6141 ixgbe_t *ixgbe = rx_ring->ixgbe; 6142 int r_idx = rx_ring->index; 6143 int hw_r_idx = rx_ring->hw_index; 6144 int v_idx = rx_ring->intr_vector; 6145 6146 mutex_enter(&ixgbe->gen_lock); 6147 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6148 mutex_exit(&ixgbe->gen_lock); 6149 /* 6150 * Simply return 0. 6151 * Interrupts are being adjusted. ixgbe_intr_adjust() 6152 * will eventually re-enable the interrupt when it's 6153 * done with the adjustment. 6154 */ 6155 return (0); 6156 } 6157 6158 /* 6159 * To enable interrupt by setting the VAL bit of given interrupt 6160 * vector allocation register (IVAR). 6161 */ 6162 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 6163 6164 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 6165 6166 /* 6167 * Trigger a Rx interrupt on this ring 6168 */ 6169 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 6170 IXGBE_WRITE_FLUSH(&ixgbe->hw); 6171 6172 mutex_exit(&ixgbe->gen_lock); 6173 6174 return (0); 6175 } 6176 6177 /* 6178 * Disable interrupt on the specificed rx ring. 6179 */ 6180 int 6181 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 6182 { 6183 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6184 ixgbe_t *ixgbe = rx_ring->ixgbe; 6185 int r_idx = rx_ring->index; 6186 int hw_r_idx = rx_ring->hw_index; 6187 int v_idx = rx_ring->intr_vector; 6188 6189 mutex_enter(&ixgbe->gen_lock); 6190 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6191 mutex_exit(&ixgbe->gen_lock); 6192 /* 6193 * Simply return 0. 6194 * In the rare case where an interrupt is being 6195 * disabled while interrupts are being adjusted, 6196 * we don't fail the operation. No interrupts will 6197 * be generated while they are adjusted, and 6198 * ixgbe_intr_adjust() will cause the interrupts 6199 * to be re-enabled once it completes. Note that 6200 * in this case, packets may be delivered to the 6201 * stack via interrupts before xgbe_rx_ring_intr_enable() 6202 * is called again. This is acceptable since interrupt 6203 * adjustment is infrequent, and the stack will be 6204 * able to handle these packets. 6205 */ 6206 return (0); 6207 } 6208 6209 /* 6210 * To disable interrupt by clearing the VAL bit of given interrupt 6211 * vector allocation register (IVAR). 6212 */ 6213 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 6214 6215 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 6216 6217 mutex_exit(&ixgbe->gen_lock); 6218 6219 return (0); 6220 } 6221 6222 /* 6223 * Add a mac address. 6224 */ 6225 static int 6226 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 6227 { 6228 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6229 ixgbe_t *ixgbe = rx_group->ixgbe; 6230 struct ixgbe_hw *hw = &ixgbe->hw; 6231 int slot, i; 6232 6233 mutex_enter(&ixgbe->gen_lock); 6234 6235 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6236 mutex_exit(&ixgbe->gen_lock); 6237 return (ECANCELED); 6238 } 6239 6240 if (ixgbe->unicst_avail == 0) { 6241 /* no slots available */ 6242 mutex_exit(&ixgbe->gen_lock); 6243 return (ENOSPC); 6244 } 6245 6246 /* 6247 * The first ixgbe->num_rx_groups slots are reserved for each respective 6248 * group. The rest slots are shared by all groups. While adding a 6249 * MAC address, reserved slots are firstly checked then the shared 6250 * slots are searched. 6251 */ 6252 slot = -1; 6253 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 6254 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 6255 if (ixgbe->unicst_addr[i].mac.set == 0) { 6256 slot = i; 6257 break; 6258 } 6259 } 6260 } else { 6261 slot = rx_group->index; 6262 } 6263 6264 if (slot == -1) { 6265 /* no slots available */ 6266 mutex_exit(&ixgbe->gen_lock); 6267 return (ENOSPC); 6268 } 6269 6270 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6271 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 6272 rx_group->index, IXGBE_RAH_AV); 6273 ixgbe->unicst_addr[slot].mac.set = 1; 6274 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 6275 ixgbe->unicst_avail--; 6276 6277 mutex_exit(&ixgbe->gen_lock); 6278 6279 return (0); 6280 } 6281 6282 /* 6283 * Remove a mac address. 6284 */ 6285 static int 6286 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 6287 { 6288 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6289 ixgbe_t *ixgbe = rx_group->ixgbe; 6290 struct ixgbe_hw *hw = &ixgbe->hw; 6291 int slot; 6292 6293 mutex_enter(&ixgbe->gen_lock); 6294 6295 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6296 mutex_exit(&ixgbe->gen_lock); 6297 return (ECANCELED); 6298 } 6299 6300 slot = ixgbe_unicst_find(ixgbe, mac_addr); 6301 if (slot == -1) { 6302 mutex_exit(&ixgbe->gen_lock); 6303 return (EINVAL); 6304 } 6305 6306 if (ixgbe->unicst_addr[slot].mac.set == 0) { 6307 mutex_exit(&ixgbe->gen_lock); 6308 return (EINVAL); 6309 } 6310 6311 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6312 (void) ixgbe_clear_rar(hw, slot); 6313 ixgbe->unicst_addr[slot].mac.set = 0; 6314 ixgbe->unicst_avail++; 6315 6316 mutex_exit(&ixgbe->gen_lock); 6317 6318 return (0); 6319 }