Print this page
6601 Various GLD drivers return EINVAL instead of ENOTSUP for unused mac_prop_id_t's
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Igor Kozhukhov <ikozhukhov@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/igb/igb_gld.c
+++ new/usr/src/uts/common/io/igb/igb_gld.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 28 * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29 29 * Copyright 2014 Pluribus Networks Inc.
30 + * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
30 31 */
31 32
32 33 #include "igb_sw.h"
33 34
34 35 int
35 36 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
36 37 {
37 38 igb_t *igb = (igb_t *)arg;
38 39 struct e1000_hw *hw = &igb->hw;
39 40 igb_stat_t *igb_ks;
40 41 uint32_t low_val, high_val;
41 42
42 43 igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
43 44
44 45 mutex_enter(&igb->gen_lock);
45 46
46 47 if (igb->igb_state & IGB_SUSPENDED) {
47 48 mutex_exit(&igb->gen_lock);
48 49 return (ECANCELED);
49 50 }
50 51
51 52 switch (stat) {
52 53 case MAC_STAT_IFSPEED:
53 54 *val = igb->link_speed * 1000000ull;
54 55 break;
55 56
56 57 case MAC_STAT_MULTIRCV:
57 58 igb->stat_mprc += E1000_READ_REG(hw, E1000_MPRC);
58 59 *val = igb->stat_mprc;
59 60 break;
60 61
61 62 case MAC_STAT_BRDCSTRCV:
62 63 igb->stat_bprc += E1000_READ_REG(hw, E1000_BPRC);
63 64 *val = igb->stat_bprc;
64 65 break;
65 66
66 67 case MAC_STAT_MULTIXMT:
67 68 igb->stat_mptc += E1000_READ_REG(hw, E1000_MPTC);
68 69 *val = igb->stat_mptc;
69 70 break;
70 71
71 72 case MAC_STAT_BRDCSTXMT:
72 73 igb->stat_bptc += E1000_READ_REG(hw, E1000_BPTC);
73 74 *val = igb->stat_bptc;
74 75 break;
75 76
76 77 case MAC_STAT_NORCVBUF:
77 78 igb->stat_rnbc += E1000_READ_REG(hw, E1000_RNBC);
78 79 *val = igb->stat_rnbc;
79 80 break;
80 81
81 82 case MAC_STAT_IERRORS:
82 83 igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
83 84 igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
84 85 igb_ks->rlec.value.ui64 +=
85 86 E1000_READ_REG(hw, E1000_RLEC);
86 87 igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
87 88 igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
88 89 *val = igb->stat_rxerrc +
89 90 igb->stat_algnerrc +
90 91 igb_ks->rlec.value.ui64 +
91 92 igb->stat_crcerrs +
92 93 igb->stat_cexterr;
93 94 break;
94 95
95 96 case MAC_STAT_NOXMTBUF:
96 97 *val = 0;
97 98 break;
98 99
99 100 case MAC_STAT_OERRORS:
100 101 igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
101 102 *val = igb->stat_ecol;
102 103 break;
103 104
104 105 case MAC_STAT_COLLISIONS:
105 106 igb->stat_colc += E1000_READ_REG(hw, E1000_COLC);
106 107 *val = igb->stat_colc;
107 108 break;
108 109
109 110 case MAC_STAT_RBYTES:
110 111 /*
111 112 * The 64-bit register will reset whenever the upper
112 113 * 32 bits are read. So we need to read the lower
113 114 * 32 bits first, then read the upper 32 bits.
114 115 */
115 116 low_val = E1000_READ_REG(hw, E1000_TORL);
116 117 high_val = E1000_READ_REG(hw, E1000_TORH);
117 118 igb->stat_tor += (uint64_t)high_val << 32 | (uint64_t)low_val;
118 119 *val = igb->stat_tor;
119 120 break;
120 121
121 122 case MAC_STAT_IPACKETS:
122 123 igb->stat_tpr += E1000_READ_REG(hw, E1000_TPR);
123 124 *val = igb->stat_tpr;
124 125 break;
125 126
126 127 case MAC_STAT_OBYTES:
127 128 /*
128 129 * The 64-bit register will reset whenever the upper
129 130 * 32 bits are read. So we need to read the lower
130 131 * 32 bits first, then read the upper 32 bits.
131 132 */
132 133 low_val = E1000_READ_REG(hw, E1000_TOTL);
133 134 high_val = E1000_READ_REG(hw, E1000_TOTH);
134 135 igb->stat_tot += (uint64_t)high_val << 32 | (uint64_t)low_val;
135 136 *val = igb->stat_tot;
136 137 break;
137 138
138 139 case MAC_STAT_OPACKETS:
139 140 igb->stat_tpt += E1000_READ_REG(hw, E1000_TPT);
140 141 *val = igb->stat_tpt;
141 142 break;
142 143
143 144 /* RFC 1643 stats */
144 145 case ETHER_STAT_ALIGN_ERRORS:
145 146 igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
146 147 *val = igb->stat_algnerrc;
147 148 break;
148 149
149 150 case ETHER_STAT_FCS_ERRORS:
150 151 igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
151 152 *val = igb->stat_crcerrs;
152 153 break;
153 154
154 155 case ETHER_STAT_FIRST_COLLISIONS:
155 156 igb->stat_scc += E1000_READ_REG(hw, E1000_SCC);
156 157 *val = igb->stat_scc;
157 158 break;
158 159
159 160 case ETHER_STAT_MULTI_COLLISIONS:
160 161 igb->stat_mcc += E1000_READ_REG(hw, E1000_MCC);
161 162 *val = igb->stat_mcc;
162 163 break;
163 164
164 165 case ETHER_STAT_SQE_ERRORS:
165 166 igb->stat_sec += E1000_READ_REG(hw, E1000_SEC);
166 167 *val = igb->stat_sec;
167 168 break;
168 169
169 170 case ETHER_STAT_DEFER_XMTS:
170 171 igb->stat_dc += E1000_READ_REG(hw, E1000_DC);
171 172 *val = igb->stat_dc;
172 173 break;
173 174
174 175 case ETHER_STAT_TX_LATE_COLLISIONS:
175 176 igb->stat_latecol += E1000_READ_REG(hw, E1000_LATECOL);
176 177 *val = igb->stat_latecol;
177 178 break;
178 179
179 180 case ETHER_STAT_EX_COLLISIONS:
180 181 igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
181 182 *val = igb->stat_ecol;
182 183 break;
183 184
184 185 case ETHER_STAT_MACXMT_ERRORS:
185 186 igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
186 187 *val = igb->stat_ecol;
187 188 break;
188 189
189 190 case ETHER_STAT_CARRIER_ERRORS:
190 191 igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
191 192 *val = igb->stat_cexterr;
192 193 break;
193 194
194 195 case ETHER_STAT_TOOLONG_ERRORS:
195 196 igb->stat_roc += E1000_READ_REG(hw, E1000_ROC);
196 197 *val = igb->stat_roc;
197 198 break;
198 199
199 200 case ETHER_STAT_MACRCV_ERRORS:
200 201 igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
201 202 *val = igb->stat_rxerrc;
202 203 break;
203 204
204 205 /* MII/GMII stats */
205 206 case ETHER_STAT_XCVR_ADDR:
206 207 /* The Internal PHY's MDI address for each MAC is 1 */
207 208 *val = 1;
208 209 break;
209 210
210 211 case ETHER_STAT_XCVR_ID:
211 212 *val = hw->phy.id | hw->phy.revision;
212 213 break;
213 214
214 215 case ETHER_STAT_XCVR_INUSE:
215 216 switch (igb->link_speed) {
216 217 case SPEED_1000:
217 218 *val =
218 219 (hw->phy.media_type == e1000_media_type_copper) ?
219 220 XCVR_1000T : XCVR_1000X;
220 221 break;
221 222 case SPEED_100:
222 223 *val =
223 224 (hw->phy.media_type == e1000_media_type_copper) ?
224 225 (igb->param_100t4_cap == 1) ?
225 226 XCVR_100T4 : XCVR_100T2 : XCVR_100X;
226 227 break;
227 228 case SPEED_10:
228 229 *val = XCVR_10;
229 230 break;
230 231 default:
231 232 *val = XCVR_NONE;
232 233 break;
233 234 }
234 235 break;
235 236
236 237 case ETHER_STAT_CAP_1000FDX:
237 238 *val = igb->param_1000fdx_cap;
238 239 break;
239 240
240 241 case ETHER_STAT_CAP_1000HDX:
241 242 *val = igb->param_1000hdx_cap;
242 243 break;
243 244
244 245 case ETHER_STAT_CAP_100FDX:
245 246 *val = igb->param_100fdx_cap;
246 247 break;
247 248
248 249 case ETHER_STAT_CAP_100HDX:
249 250 *val = igb->param_100hdx_cap;
250 251 break;
251 252
252 253 case ETHER_STAT_CAP_10FDX:
253 254 *val = igb->param_10fdx_cap;
254 255 break;
255 256
256 257 case ETHER_STAT_CAP_10HDX:
257 258 *val = igb->param_10hdx_cap;
258 259 break;
259 260
260 261 case ETHER_STAT_CAP_ASMPAUSE:
261 262 *val = igb->param_asym_pause_cap;
262 263 break;
263 264
264 265 case ETHER_STAT_CAP_PAUSE:
265 266 *val = igb->param_pause_cap;
266 267 break;
267 268
268 269 case ETHER_STAT_CAP_AUTONEG:
269 270 *val = igb->param_autoneg_cap;
270 271 break;
271 272
272 273 case ETHER_STAT_ADV_CAP_1000FDX:
273 274 *val = igb->param_adv_1000fdx_cap;
274 275 break;
275 276
276 277 case ETHER_STAT_ADV_CAP_1000HDX:
277 278 *val = igb->param_adv_1000hdx_cap;
278 279 break;
279 280
280 281 case ETHER_STAT_ADV_CAP_100FDX:
281 282 *val = igb->param_adv_100fdx_cap;
282 283 break;
283 284
284 285 case ETHER_STAT_ADV_CAP_100HDX:
285 286 *val = igb->param_adv_100hdx_cap;
286 287 break;
287 288
288 289 case ETHER_STAT_ADV_CAP_10FDX:
289 290 *val = igb->param_adv_10fdx_cap;
290 291 break;
291 292
292 293 case ETHER_STAT_ADV_CAP_10HDX:
293 294 *val = igb->param_adv_10hdx_cap;
294 295 break;
295 296
296 297 case ETHER_STAT_ADV_CAP_ASMPAUSE:
297 298 *val = igb->param_adv_asym_pause_cap;
298 299 break;
299 300
300 301 case ETHER_STAT_ADV_CAP_PAUSE:
301 302 *val = igb->param_adv_pause_cap;
302 303 break;
303 304
304 305 case ETHER_STAT_ADV_CAP_AUTONEG:
305 306 *val = hw->mac.autoneg;
306 307 break;
307 308
308 309 case ETHER_STAT_LP_CAP_1000FDX:
309 310 *val = igb->param_lp_1000fdx_cap;
310 311 break;
311 312
312 313 case ETHER_STAT_LP_CAP_1000HDX:
313 314 *val = igb->param_lp_1000hdx_cap;
314 315 break;
315 316
316 317 case ETHER_STAT_LP_CAP_100FDX:
317 318 *val = igb->param_lp_100fdx_cap;
318 319 break;
319 320
320 321 case ETHER_STAT_LP_CAP_100HDX:
321 322 *val = igb->param_lp_100hdx_cap;
322 323 break;
323 324
324 325 case ETHER_STAT_LP_CAP_10FDX:
325 326 *val = igb->param_lp_10fdx_cap;
326 327 break;
327 328
328 329 case ETHER_STAT_LP_CAP_10HDX:
329 330 *val = igb->param_lp_10hdx_cap;
330 331 break;
331 332
332 333 case ETHER_STAT_LP_CAP_ASMPAUSE:
333 334 *val = igb->param_lp_asym_pause_cap;
334 335 break;
335 336
336 337 case ETHER_STAT_LP_CAP_PAUSE:
337 338 *val = igb->param_lp_pause_cap;
338 339 break;
339 340
340 341 case ETHER_STAT_LP_CAP_AUTONEG:
341 342 *val = igb->param_lp_autoneg_cap;
342 343 break;
343 344
344 345 case ETHER_STAT_LINK_ASMPAUSE:
345 346 *val = igb->param_asym_pause_cap;
346 347 break;
347 348
348 349 case ETHER_STAT_LINK_PAUSE:
349 350 *val = igb->param_pause_cap;
350 351 break;
351 352
352 353 case ETHER_STAT_LINK_AUTONEG:
353 354 *val = hw->mac.autoneg;
354 355 break;
355 356
356 357 case ETHER_STAT_LINK_DUPLEX:
357 358 *val = (igb->link_duplex == FULL_DUPLEX) ?
358 359 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
359 360 break;
360 361
361 362 case ETHER_STAT_TOOSHORT_ERRORS:
362 363 igb->stat_ruc += E1000_READ_REG(hw, E1000_RUC);
363 364 *val = igb->stat_ruc;
364 365 break;
365 366
366 367 case ETHER_STAT_CAP_REMFAULT:
367 368 *val = igb->param_rem_fault;
368 369 break;
369 370
370 371 case ETHER_STAT_ADV_REMFAULT:
371 372 *val = igb->param_adv_rem_fault;
372 373 break;
373 374
374 375 case ETHER_STAT_LP_REMFAULT:
375 376 *val = igb->param_lp_rem_fault;
376 377 break;
377 378
378 379 case ETHER_STAT_JABBER_ERRORS:
379 380 igb->stat_rjc += E1000_READ_REG(hw, E1000_RJC);
380 381 *val = igb->stat_rjc;
381 382 break;
382 383
383 384 case ETHER_STAT_CAP_100T4:
384 385 *val = igb->param_100t4_cap;
385 386 break;
386 387
387 388 case ETHER_STAT_ADV_CAP_100T4:
388 389 *val = igb->param_adv_100t4_cap;
389 390 break;
390 391
391 392 case ETHER_STAT_LP_CAP_100T4:
392 393 *val = igb->param_lp_100t4_cap;
393 394 break;
394 395
395 396 default:
396 397 mutex_exit(&igb->gen_lock);
397 398 return (ENOTSUP);
398 399 }
399 400
400 401 mutex_exit(&igb->gen_lock);
401 402
402 403 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
403 404 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
404 405 return (EIO);
405 406 }
406 407
407 408 return (0);
408 409 }
409 410
410 411 /*
411 412 * Bring the device out of the reset/quiesced state that it
412 413 * was in when the interface was registered.
413 414 */
414 415 int
415 416 igb_m_start(void *arg)
416 417 {
417 418 igb_t *igb = (igb_t *)arg;
418 419
419 420 mutex_enter(&igb->gen_lock);
420 421
421 422 if (igb->igb_state & IGB_SUSPENDED) {
422 423 mutex_exit(&igb->gen_lock);
423 424 return (ECANCELED);
424 425 }
425 426
426 427 if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
427 428 mutex_exit(&igb->gen_lock);
428 429 return (EIO);
429 430 }
430 431
431 432 atomic_or_32(&igb->igb_state, IGB_STARTED);
432 433
433 434 mutex_exit(&igb->gen_lock);
434 435
435 436 /*
436 437 * Enable and start the watchdog timer
437 438 */
438 439 igb_enable_watchdog_timer(igb);
439 440
440 441 return (0);
441 442 }
442 443
443 444 /*
444 445 * Stop the device and put it in a reset/quiesced state such
445 446 * that the interface can be unregistered.
446 447 */
447 448 void
448 449 igb_m_stop(void *arg)
449 450 {
450 451 igb_t *igb = (igb_t *)arg;
451 452
452 453 mutex_enter(&igb->gen_lock);
453 454
454 455 if (igb->igb_state & IGB_SUSPENDED) {
455 456 mutex_exit(&igb->gen_lock);
456 457 return;
457 458 }
458 459
459 460 atomic_and_32(&igb->igb_state, ~IGB_STARTED);
460 461
461 462 igb_stop(igb, B_TRUE);
462 463
463 464 mutex_exit(&igb->gen_lock);
464 465
465 466 /*
466 467 * Disable and stop the watchdog timer
467 468 */
468 469 igb_disable_watchdog_timer(igb);
469 470 }
470 471
471 472 /*
472 473 * Set the promiscuity of the device.
473 474 */
474 475 int
475 476 igb_m_promisc(void *arg, boolean_t on)
476 477 {
477 478 igb_t *igb = (igb_t *)arg;
478 479 uint32_t reg_val;
479 480
480 481 mutex_enter(&igb->gen_lock);
481 482
482 483 if (igb->igb_state & IGB_SUSPENDED) {
483 484 mutex_exit(&igb->gen_lock);
484 485 return (ECANCELED);
485 486 }
486 487
487 488 reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
488 489
489 490 if (on)
490 491 reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
491 492 else
492 493 reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
493 494
494 495 E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
495 496
496 497 mutex_exit(&igb->gen_lock);
497 498
498 499 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
499 500 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
500 501 return (EIO);
501 502 }
502 503
503 504 return (0);
504 505 }
505 506
506 507 /*
507 508 * Add/remove the addresses to/from the set of multicast
508 509 * addresses for which the device will receive packets.
509 510 */
510 511 int
511 512 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
512 513 {
513 514 igb_t *igb = (igb_t *)arg;
514 515 int result;
515 516
516 517 mutex_enter(&igb->gen_lock);
517 518
518 519 if (igb->igb_state & IGB_SUSPENDED) {
519 520 mutex_exit(&igb->gen_lock);
520 521 return (ECANCELED);
521 522 }
522 523
523 524 result = (add) ? igb_multicst_add(igb, mcst_addr)
524 525 : igb_multicst_remove(igb, mcst_addr);
525 526
526 527 mutex_exit(&igb->gen_lock);
527 528
528 529 return (result);
529 530 }
530 531
531 532 /*
532 533 * Pass on M_IOCTL messages passed to the DLD, and support
533 534 * private IOCTLs for debugging and ndd.
534 535 */
535 536 void
536 537 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
537 538 {
538 539 igb_t *igb = (igb_t *)arg;
539 540 struct iocblk *iocp;
540 541 enum ioc_reply status;
541 542
542 543 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
543 544 iocp->ioc_error = 0;
544 545
545 546 mutex_enter(&igb->gen_lock);
546 547 if (igb->igb_state & IGB_SUSPENDED) {
547 548 mutex_exit(&igb->gen_lock);
548 549 miocnak(q, mp, 0, EINVAL);
549 550 return;
550 551 }
551 552 mutex_exit(&igb->gen_lock);
552 553
553 554 switch (iocp->ioc_cmd) {
554 555 case LB_GET_INFO_SIZE:
555 556 case LB_GET_INFO:
556 557 case LB_GET_MODE:
557 558 case LB_SET_MODE:
558 559 status = igb_loopback_ioctl(igb, iocp, mp);
559 560 break;
560 561
561 562 default:
562 563 status = IOC_INVAL;
563 564 break;
564 565 }
565 566
566 567 /*
567 568 * Decide how to reply
568 569 */
569 570 switch (status) {
570 571 default:
571 572 case IOC_INVAL:
572 573 /*
573 574 * Error, reply with a NAK and EINVAL or the specified error
574 575 */
575 576 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
576 577 EINVAL : iocp->ioc_error);
577 578 break;
578 579
579 580 case IOC_DONE:
580 581 /*
581 582 * OK, reply already sent
582 583 */
583 584 break;
584 585
585 586 case IOC_ACK:
586 587 /*
587 588 * OK, reply with an ACK
588 589 */
589 590 miocack(q, mp, 0, 0);
590 591 break;
591 592
592 593 case IOC_REPLY:
593 594 /*
594 595 * OK, send prepared reply as ACK or NAK
595 596 */
596 597 mp->b_datap->db_type = iocp->ioc_error == 0 ?
597 598 M_IOCACK : M_IOCNAK;
598 599 qreply(q, mp);
599 600 break;
600 601 }
601 602 }
602 603
603 604 /*
604 605 * Add a MAC address to the target RX group.
605 606 */
606 607 static int
607 608 igb_addmac(void *arg, const uint8_t *mac_addr)
608 609 {
609 610 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
610 611 igb_t *igb = rx_group->igb;
611 612 struct e1000_hw *hw = &igb->hw;
612 613 int i, slot;
613 614
614 615 mutex_enter(&igb->gen_lock);
615 616
616 617 if (igb->igb_state & IGB_SUSPENDED) {
617 618 mutex_exit(&igb->gen_lock);
618 619 return (ECANCELED);
619 620 }
620 621
621 622 if (igb->unicst_avail == 0) {
622 623 /* no slots available */
623 624 mutex_exit(&igb->gen_lock);
624 625 return (ENOSPC);
625 626 }
626 627
627 628 /*
628 629 * The slots from 0 to igb->num_rx_groups are reserved slots which
629 630 * are 1 to 1 mapped with group index directly. The other slots are
630 631 * shared between the all of groups. While adding a MAC address,
631 632 * it will try to set the reserved slots first, then the shared slots.
632 633 */
633 634 slot = -1;
634 635 if (igb->unicst_addr[rx_group->index].mac.set == 1) {
635 636 /*
636 637 * The reserved slot for current group is used, find the free
637 638 * slots in the shared slots.
638 639 */
639 640 for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
640 641 if (igb->unicst_addr[i].mac.set == 0) {
641 642 slot = i;
642 643 break;
643 644 }
644 645 }
645 646 } else
646 647 slot = rx_group->index;
647 648
648 649 if (slot == -1) {
649 650 /* no slots available in the shared slots */
650 651 mutex_exit(&igb->gen_lock);
651 652 return (ENOSPC);
652 653 }
653 654
654 655 /* Set VMDq according to the mode supported by hardware. */
655 656 e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
656 657
657 658 bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
658 659 igb->unicst_addr[slot].mac.group_index = rx_group->index;
659 660 igb->unicst_addr[slot].mac.set = 1;
660 661 igb->unicst_avail--;
661 662
662 663 mutex_exit(&igb->gen_lock);
663 664
664 665 return (0);
665 666 }
666 667
667 668 /*
668 669 * Remove a MAC address from the specified RX group.
669 670 */
670 671 static int
671 672 igb_remmac(void *arg, const uint8_t *mac_addr)
672 673 {
673 674 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
674 675 igb_t *igb = rx_group->igb;
675 676 struct e1000_hw *hw = &igb->hw;
676 677 int slot;
677 678
678 679 mutex_enter(&igb->gen_lock);
679 680
680 681 if (igb->igb_state & IGB_SUSPENDED) {
681 682 mutex_exit(&igb->gen_lock);
682 683 return (ECANCELED);
683 684 }
684 685
685 686 slot = igb_unicst_find(igb, mac_addr);
686 687 if (slot == -1) {
687 688 mutex_exit(&igb->gen_lock);
688 689 return (EINVAL);
689 690 }
690 691
691 692 if (igb->unicst_addr[slot].mac.set == 0) {
692 693 mutex_exit(&igb->gen_lock);
693 694 return (EINVAL);
694 695 }
695 696
696 697 /* Clear the MAC ddress in the slot */
697 698 e1000_rar_clear(hw, slot);
698 699 igb->unicst_addr[slot].mac.set = 0;
699 700 igb->unicst_avail++;
700 701
701 702 mutex_exit(&igb->gen_lock);
702 703
703 704 return (0);
704 705 }
705 706
706 707 /*
707 708 * Enable interrupt on the specificed rx ring.
708 709 */
709 710 int
710 711 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
711 712 {
712 713 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
713 714 igb_t *igb = rx_ring->igb;
714 715 struct e1000_hw *hw = &igb->hw;
715 716 uint32_t index = rx_ring->index;
716 717
717 718 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
718 719 /* Interrupt enabling for MSI-X */
719 720 igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
720 721 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
721 722 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
722 723 } else {
723 724 ASSERT(index == 0);
724 725 /* Interrupt enabling for MSI and legacy */
725 726 igb->ims_mask |= E1000_IMS_RXT0;
726 727 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
727 728 }
728 729
729 730 E1000_WRITE_FLUSH(hw);
730 731
731 732 return (0);
732 733 }
733 734
734 735 /*
735 736 * Disable interrupt on the specificed rx ring.
736 737 */
737 738 int
738 739 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
739 740 {
740 741 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
741 742 igb_t *igb = rx_ring->igb;
742 743 struct e1000_hw *hw = &igb->hw;
743 744 uint32_t index = rx_ring->index;
744 745
745 746 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
746 747 /* Interrupt disabling for MSI-X */
747 748 igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
748 749 E1000_WRITE_REG(hw, E1000_EIMC,
749 750 (E1000_EICR_RX_QUEUE0 << index));
750 751 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
751 752 } else {
752 753 ASSERT(index == 0);
753 754 /* Interrupt disabling for MSI and legacy */
754 755 igb->ims_mask &= ~E1000_IMS_RXT0;
755 756 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
756 757 }
757 758
758 759 E1000_WRITE_FLUSH(hw);
759 760
760 761 return (0);
761 762 }
762 763
763 764 /*
764 765 * Get the global ring index by a ring index within a group.
765 766 */
766 767 int
767 768 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
768 769 {
769 770 igb_rx_ring_t *rx_ring;
770 771 int i;
771 772
772 773 for (i = 0; i < igb->num_rx_rings; i++) {
773 774 rx_ring = &igb->rx_rings[i];
774 775 if (rx_ring->group_index == gindex)
775 776 rindex--;
776 777 if (rindex < 0)
777 778 return (i);
778 779 }
779 780
780 781 return (-1);
781 782 }
782 783
783 784 static int
784 785 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
785 786 {
786 787 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
787 788
788 789 mutex_enter(&rx_ring->rx_lock);
789 790 rx_ring->ring_gen_num = mr_gen_num;
790 791 mutex_exit(&rx_ring->rx_lock);
791 792 return (0);
792 793 }
793 794
794 795 /*
795 796 * Callback funtion for MAC layer to register all rings.
796 797 */
797 798 /* ARGSUSED */
798 799 void
799 800 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
800 801 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
801 802 {
802 803 igb_t *igb = (igb_t *)arg;
803 804 mac_intr_t *mintr = &infop->mri_intr;
804 805
805 806 switch (rtype) {
806 807 case MAC_RING_TYPE_RX: {
807 808 igb_rx_ring_t *rx_ring;
808 809 int global_index;
809 810
810 811 /*
811 812 * 'index' is the ring index within the group.
812 813 * We need the global ring index by searching in group.
813 814 */
814 815 global_index = igb_get_rx_ring_index(igb, rg_index, index);
815 816
816 817 ASSERT(global_index >= 0);
817 818
818 819 rx_ring = &igb->rx_rings[global_index];
819 820 rx_ring->ring_handle = rh;
820 821
821 822 infop->mri_driver = (mac_ring_driver_t)rx_ring;
822 823 infop->mri_start = igb_ring_start;
823 824 infop->mri_stop = NULL;
824 825 infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
825 826 infop->mri_stat = igb_rx_ring_stat;
826 827
827 828 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
828 829 mintr->mi_enable = igb_rx_ring_intr_enable;
829 830 mintr->mi_disable = igb_rx_ring_intr_disable;
830 831 if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
831 832 mintr->mi_ddi_handle =
832 833 igb->htable[rx_ring->intr_vector];
833 834 }
834 835 break;
835 836 }
836 837 case MAC_RING_TYPE_TX: {
837 838 ASSERT(index < igb->num_tx_rings);
838 839
839 840 igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
840 841 tx_ring->ring_handle = rh;
841 842
842 843 infop->mri_driver = (mac_ring_driver_t)tx_ring;
843 844 infop->mri_start = NULL;
844 845 infop->mri_stop = NULL;
845 846 infop->mri_tx = igb_tx_ring_send;
846 847 infop->mri_stat = igb_tx_ring_stat;
847 848 if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
848 849 mintr->mi_ddi_handle =
849 850 igb->htable[tx_ring->intr_vector];
850 851 }
851 852 break;
852 853 }
853 854 default:
854 855 break;
855 856 }
856 857 }
857 858
858 859 void
859 860 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
860 861 mac_group_info_t *infop, mac_group_handle_t gh)
861 862 {
862 863 igb_t *igb = (igb_t *)arg;
863 864
864 865 switch (rtype) {
865 866 case MAC_RING_TYPE_RX: {
866 867 igb_rx_group_t *rx_group;
867 868
868 869 ASSERT((index >= 0) && (index < igb->num_rx_groups));
869 870
870 871 rx_group = &igb->rx_groups[index];
871 872 rx_group->group_handle = gh;
872 873
873 874 infop->mgi_driver = (mac_group_driver_t)rx_group;
874 875 infop->mgi_start = NULL;
875 876 infop->mgi_stop = NULL;
876 877 infop->mgi_addmac = igb_addmac;
877 878 infop->mgi_remmac = igb_remmac;
878 879 infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
879 880
880 881 break;
881 882 }
882 883 case MAC_RING_TYPE_TX:
883 884 break;
884 885 default:
885 886 break;
886 887 }
887 888 }
888 889
889 890 /*
890 891 * Obtain the MAC's capabilities and associated data from
891 892 * the driver.
892 893 */
893 894 boolean_t
894 895 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
895 896 {
896 897 igb_t *igb = (igb_t *)arg;
897 898
898 899 switch (cap) {
899 900 case MAC_CAPAB_HCKSUM: {
900 901 uint32_t *tx_hcksum_flags = cap_data;
901 902
902 903 /*
903 904 * We advertise our capabilities only if tx hcksum offload is
904 905 * enabled. On receive, the stack will accept checksummed
905 906 * packets anyway, even if we haven't said we can deliver
906 907 * them.
907 908 */
908 909 if (!igb->tx_hcksum_enable)
909 910 return (B_FALSE);
910 911
911 912 *tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
912 913 break;
913 914 }
914 915 case MAC_CAPAB_LSO: {
915 916 mac_capab_lso_t *cap_lso = cap_data;
916 917
917 918 if (igb->lso_enable) {
918 919 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
919 920 cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
920 921 break;
921 922 } else {
922 923 return (B_FALSE);
923 924 }
924 925 }
925 926 case MAC_CAPAB_RINGS: {
926 927 mac_capab_rings_t *cap_rings = cap_data;
927 928
928 929 switch (cap_rings->mr_type) {
929 930 case MAC_RING_TYPE_RX:
930 931 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
931 932 cap_rings->mr_rnum = igb->num_rx_rings;
932 933 cap_rings->mr_gnum = igb->num_rx_groups;
933 934 cap_rings->mr_rget = igb_fill_ring;
934 935 cap_rings->mr_gget = igb_fill_group;
935 936 cap_rings->mr_gaddring = NULL;
936 937 cap_rings->mr_gremring = NULL;
937 938
938 939 break;
939 940 case MAC_RING_TYPE_TX:
940 941 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
941 942 cap_rings->mr_rnum = igb->num_tx_rings;
942 943 cap_rings->mr_gnum = 0;
943 944 cap_rings->mr_rget = igb_fill_ring;
944 945 cap_rings->mr_gget = NULL;
945 946
946 947 break;
947 948 default:
948 949 break;
949 950 }
950 951 break;
951 952 }
952 953
953 954 default:
954 955 return (B_FALSE);
955 956 }
956 957 return (B_TRUE);
957 958 }
958 959
959 960 int
960 961 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
961 962 uint_t pr_valsize, const void *pr_val)
962 963 {
963 964 igb_t *igb = (igb_t *)arg;
964 965 struct e1000_hw *hw = &igb->hw;
965 966 int err = 0;
966 967 uint32_t flow_control;
967 968 uint32_t cur_mtu, new_mtu;
968 969 uint32_t rx_size;
969 970 uint32_t tx_size;
970 971
971 972 mutex_enter(&igb->gen_lock);
972 973 if (igb->igb_state & IGB_SUSPENDED) {
973 974 mutex_exit(&igb->gen_lock);
974 975 return (ECANCELED);
975 976 }
976 977
977 978 if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
978 979 /*
979 980 * All en_* parameters are locked (read-only)
980 981 * while the device is in any sort of loopback mode.
981 982 */
982 983 mutex_exit(&igb->gen_lock);
983 984 return (EBUSY);
984 985 }
985 986
986 987 switch (pr_num) {
987 988 case MAC_PROP_EN_1000FDX_CAP:
988 989 /* read/write on copper, read-only on serdes */
989 990 if (hw->phy.media_type != e1000_media_type_copper) {
990 991 err = ENOTSUP;
991 992 break;
992 993 }
993 994 igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
994 995 igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
995 996 goto setup_link;
996 997 case MAC_PROP_EN_100FDX_CAP:
997 998 if (hw->phy.media_type != e1000_media_type_copper) {
998 999 err = ENOTSUP;
999 1000 break;
1000 1001 }
1001 1002 igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
1002 1003 igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
1003 1004 goto setup_link;
1004 1005 case MAC_PROP_EN_100HDX_CAP:
1005 1006 if (hw->phy.media_type != e1000_media_type_copper) {
1006 1007 err = ENOTSUP;
1007 1008 break;
1008 1009 }
1009 1010 igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
1010 1011 igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
1011 1012 goto setup_link;
1012 1013 case MAC_PROP_EN_10FDX_CAP:
1013 1014 if (hw->phy.media_type != e1000_media_type_copper) {
1014 1015 err = ENOTSUP;
1015 1016 break;
1016 1017 }
1017 1018 igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
1018 1019 igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
1019 1020 goto setup_link;
1020 1021 case MAC_PROP_EN_10HDX_CAP:
1021 1022 if (hw->phy.media_type != e1000_media_type_copper) {
1022 1023 err = ENOTSUP;
1023 1024 break;
1024 1025 }
1025 1026 igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
1026 1027 igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
1027 1028 goto setup_link;
1028 1029 case MAC_PROP_AUTONEG:
1029 1030 if (hw->phy.media_type != e1000_media_type_copper) {
1030 1031 err = ENOTSUP;
1031 1032 break;
1032 1033 }
1033 1034 igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
1034 1035 goto setup_link;
1035 1036 case MAC_PROP_FLOWCTRL:
1036 1037 bcopy(pr_val, &flow_control, sizeof (flow_control));
1037 1038
1038 1039 switch (flow_control) {
1039 1040 default:
1040 1041 err = EINVAL;
1041 1042 break;
1042 1043 case LINK_FLOWCTRL_NONE:
1043 1044 hw->fc.requested_mode = e1000_fc_none;
1044 1045 break;
1045 1046 case LINK_FLOWCTRL_RX:
1046 1047 hw->fc.requested_mode = e1000_fc_rx_pause;
1047 1048 break;
1048 1049 case LINK_FLOWCTRL_TX:
1049 1050 hw->fc.requested_mode = e1000_fc_tx_pause;
1050 1051 break;
1051 1052 case LINK_FLOWCTRL_BI:
1052 1053 hw->fc.requested_mode = e1000_fc_full;
1053 1054 break;
1054 1055 }
1055 1056 setup_link:
1056 1057 if (err == 0) {
1057 1058 if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
1058 1059 err = EINVAL;
1059 1060 }
1060 1061 break;
1061 1062 case MAC_PROP_ADV_1000FDX_CAP:
1062 1063 case MAC_PROP_ADV_1000HDX_CAP:
1063 1064 case MAC_PROP_ADV_100T4_CAP:
1064 1065 case MAC_PROP_ADV_100FDX_CAP:
1065 1066 case MAC_PROP_ADV_100HDX_CAP:
1066 1067 case MAC_PROP_ADV_10FDX_CAP:
1067 1068 case MAC_PROP_ADV_10HDX_CAP:
1068 1069 case MAC_PROP_EN_1000HDX_CAP:
1069 1070 case MAC_PROP_EN_100T4_CAP:
1070 1071 case MAC_PROP_STATUS:
1071 1072 case MAC_PROP_SPEED:
1072 1073 case MAC_PROP_DUPLEX:
1073 1074 err = ENOTSUP; /* read-only prop. Can't set this. */
1074 1075 break;
1075 1076 case MAC_PROP_MTU:
1076 1077 /* adapter must be stopped for an MTU change */
1077 1078 if (igb->igb_state & IGB_STARTED) {
1078 1079 err = EBUSY;
1079 1080 break;
1080 1081 }
1081 1082
1082 1083 cur_mtu = igb->default_mtu;
1083 1084 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1084 1085 if (new_mtu == cur_mtu) {
1085 1086 err = 0;
1086 1087 break;
1087 1088 }
1088 1089
1089 1090 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
1090 1091 err = EINVAL;
1091 1092 break;
1092 1093 }
1093 1094
1094 1095 err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
1095 1096 if (err == 0) {
1096 1097 igb->default_mtu = new_mtu;
1097 1098 igb->max_frame_size = igb->default_mtu +
1098 1099 sizeof (struct ether_vlan_header) + ETHERFCSL;
1099 1100
1100 1101 /*
1101 1102 * Set rx buffer size
1102 1103 */
1103 1104 rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1104 1105 igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
1105 1106 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1106 1107
1107 1108 /*
1108 1109 * Set tx buffer size
↓ open down ↓ |
1069 lines elided |
↑ open up ↑ |
1109 1110 */
1110 1111 tx_size = igb->max_frame_size;
1111 1112 igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
1112 1113 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1113 1114 }
1114 1115 break;
1115 1116 case MAC_PROP_PRIVATE:
1116 1117 err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
1117 1118 break;
1118 1119 default:
1119 - err = EINVAL;
1120 + err = ENOTSUP;
1120 1121 break;
1121 1122 }
1122 1123
1123 1124 mutex_exit(&igb->gen_lock);
1124 1125
1125 1126 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
1126 1127 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1127 1128 return (EIO);
1128 1129 }
1129 1130
1130 1131 return (err);
1131 1132 }
1132 1133
1133 1134 int
1134 1135 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1135 1136 uint_t pr_valsize, void *pr_val)
1136 1137 {
1137 1138 igb_t *igb = (igb_t *)arg;
1138 1139 struct e1000_hw *hw = &igb->hw;
1139 1140 int err = 0;
1140 1141 uint32_t flow_control;
1141 1142 uint64_t tmp = 0;
1142 1143
1143 1144 switch (pr_num) {
1144 1145 case MAC_PROP_DUPLEX:
1145 1146 ASSERT(pr_valsize >= sizeof (link_duplex_t));
1146 1147 bcopy(&igb->link_duplex, pr_val, sizeof (link_duplex_t));
1147 1148 break;
1148 1149 case MAC_PROP_SPEED:
1149 1150 ASSERT(pr_valsize >= sizeof (uint64_t));
1150 1151 tmp = igb->link_speed * 1000000ull;
1151 1152 bcopy(&tmp, pr_val, sizeof (tmp));
1152 1153 break;
1153 1154 case MAC_PROP_AUTONEG:
1154 1155 ASSERT(pr_valsize >= sizeof (uint8_t));
1155 1156 *(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
1156 1157 break;
1157 1158 case MAC_PROP_FLOWCTRL:
1158 1159 ASSERT(pr_valsize >= sizeof (uint32_t));
1159 1160 switch (hw->fc.requested_mode) {
1160 1161 case e1000_fc_none:
1161 1162 flow_control = LINK_FLOWCTRL_NONE;
1162 1163 break;
1163 1164 case e1000_fc_rx_pause:
1164 1165 flow_control = LINK_FLOWCTRL_RX;
1165 1166 break;
1166 1167 case e1000_fc_tx_pause:
1167 1168 flow_control = LINK_FLOWCTRL_TX;
1168 1169 break;
1169 1170 case e1000_fc_full:
1170 1171 flow_control = LINK_FLOWCTRL_BI;
1171 1172 break;
1172 1173 }
1173 1174 bcopy(&flow_control, pr_val, sizeof (flow_control));
1174 1175 break;
1175 1176 case MAC_PROP_ADV_1000FDX_CAP:
1176 1177 *(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
1177 1178 break;
1178 1179 case MAC_PROP_EN_1000FDX_CAP:
1179 1180 *(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
1180 1181 break;
1181 1182 case MAC_PROP_ADV_1000HDX_CAP:
1182 1183 *(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
1183 1184 break;
1184 1185 case MAC_PROP_EN_1000HDX_CAP:
1185 1186 *(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
1186 1187 break;
1187 1188 case MAC_PROP_ADV_100T4_CAP:
1188 1189 *(uint8_t *)pr_val = igb->param_adv_100t4_cap;
1189 1190 break;
1190 1191 case MAC_PROP_EN_100T4_CAP:
1191 1192 *(uint8_t *)pr_val = igb->param_en_100t4_cap;
1192 1193 break;
1193 1194 case MAC_PROP_ADV_100FDX_CAP:
1194 1195 *(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
1195 1196 break;
1196 1197 case MAC_PROP_EN_100FDX_CAP:
1197 1198 *(uint8_t *)pr_val = igb->param_en_100fdx_cap;
1198 1199 break;
1199 1200 case MAC_PROP_ADV_100HDX_CAP:
1200 1201 *(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
1201 1202 break;
1202 1203 case MAC_PROP_EN_100HDX_CAP:
1203 1204 *(uint8_t *)pr_val = igb->param_en_100hdx_cap;
1204 1205 break;
1205 1206 case MAC_PROP_ADV_10FDX_CAP:
1206 1207 *(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
1207 1208 break;
1208 1209 case MAC_PROP_EN_10FDX_CAP:
1209 1210 *(uint8_t *)pr_val = igb->param_en_10fdx_cap;
1210 1211 break;
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
1211 1212 case MAC_PROP_ADV_10HDX_CAP:
1212 1213 *(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
1213 1214 break;
1214 1215 case MAC_PROP_EN_10HDX_CAP:
1215 1216 *(uint8_t *)pr_val = igb->param_en_10hdx_cap;
1216 1217 break;
1217 1218 case MAC_PROP_PRIVATE:
1218 1219 err = igb_get_priv_prop(igb, pr_name, pr_valsize, pr_val);
1219 1220 break;
1220 1221 default:
1221 - err = EINVAL;
1222 + err = ENOTSUP;
1222 1223 break;
1223 1224 }
1224 1225 return (err);
1225 1226 }
1226 1227
1227 1228 void
1228 1229 igb_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1229 1230 mac_prop_info_handle_t prh)
1230 1231 {
1231 1232 igb_t *igb = (igb_t *)arg;
1232 1233 struct e1000_hw *hw = &igb->hw;
1233 1234 uint16_t phy_status, phy_ext_status;
1234 1235
1235 1236 switch (pr_num) {
1236 1237 case MAC_PROP_DUPLEX:
1237 1238 case MAC_PROP_SPEED:
1238 1239 case MAC_PROP_ADV_1000FDX_CAP:
1239 1240 case MAC_PROP_ADV_1000HDX_CAP:
1240 1241 case MAC_PROP_EN_1000HDX_CAP:
1241 1242 case MAC_PROP_ADV_100T4_CAP:
1242 1243 case MAC_PROP_EN_100T4_CAP:
1243 1244 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1244 1245 break;
1245 1246
1246 1247 case MAC_PROP_EN_1000FDX_CAP:
1247 1248 if (hw->phy.media_type != e1000_media_type_copper) {
1248 1249 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1249 1250 } else {
1250 1251 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
1251 1252 &phy_ext_status);
1252 1253 mac_prop_info_set_default_uint8(prh,
1253 1254 ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
1254 1255 (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
1255 1256 }
1256 1257 break;
1257 1258
1258 1259 case MAC_PROP_ADV_100FDX_CAP:
1259 1260 case MAC_PROP_EN_100FDX_CAP:
1260 1261 if (hw->phy.media_type != e1000_media_type_copper) {
1261 1262 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1262 1263 } else {
1263 1264 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1264 1265 mac_prop_info_set_default_uint8(prh,
1265 1266 ((phy_status & MII_SR_100X_FD_CAPS) ||
1266 1267 (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0);
1267 1268 }
1268 1269 break;
1269 1270
1270 1271 case MAC_PROP_ADV_100HDX_CAP:
1271 1272 case MAC_PROP_EN_100HDX_CAP:
1272 1273 if (hw->phy.media_type != e1000_media_type_copper) {
1273 1274 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1274 1275 } else {
1275 1276 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1276 1277 mac_prop_info_set_default_uint8(prh,
1277 1278 ((phy_status & MII_SR_100X_HD_CAPS) ||
1278 1279 (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0);
1279 1280 }
1280 1281 break;
1281 1282
1282 1283 case MAC_PROP_ADV_10FDX_CAP:
1283 1284 case MAC_PROP_EN_10FDX_CAP:
1284 1285 if (hw->phy.media_type != e1000_media_type_copper) {
1285 1286 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1286 1287 } else {
1287 1288 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1288 1289 mac_prop_info_set_default_uint8(prh,
1289 1290 (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
1290 1291 }
1291 1292 break;
1292 1293
1293 1294 case MAC_PROP_ADV_10HDX_CAP:
1294 1295 case MAC_PROP_EN_10HDX_CAP:
1295 1296 if (hw->phy.media_type != e1000_media_type_copper) {
1296 1297 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1297 1298 } else {
1298 1299 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1299 1300 mac_prop_info_set_default_uint8(prh,
1300 1301 (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
1301 1302 }
1302 1303 break;
1303 1304
1304 1305 case MAC_PROP_AUTONEG:
1305 1306 if (hw->phy.media_type != e1000_media_type_copper) {
1306 1307 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1307 1308 } else {
1308 1309 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1309 1310 mac_prop_info_set_default_uint8(prh,
1310 1311 (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
1311 1312 }
1312 1313 break;
1313 1314
1314 1315 case MAC_PROP_FLOWCTRL:
1315 1316 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
1316 1317 break;
1317 1318
1318 1319 case MAC_PROP_MTU:
1319 1320 mac_prop_info_set_range_uint32(prh, MIN_MTU, MAX_MTU);
1320 1321 break;
1321 1322
1322 1323 case MAC_PROP_PRIVATE:
1323 1324 igb_priv_prop_info(igb, pr_name, prh);
1324 1325 break;
1325 1326 }
1326 1327
1327 1328 }
1328 1329
1329 1330 boolean_t
1330 1331 igb_param_locked(mac_prop_id_t pr_num)
1331 1332 {
1332 1333 /*
1333 1334 * All en_* parameters are locked (read-only) while
1334 1335 * the device is in any sort of loopback mode ...
1335 1336 */
1336 1337 switch (pr_num) {
1337 1338 case MAC_PROP_EN_1000FDX_CAP:
1338 1339 case MAC_PROP_EN_1000HDX_CAP:
1339 1340 case MAC_PROP_EN_100T4_CAP:
1340 1341 case MAC_PROP_EN_100FDX_CAP:
1341 1342 case MAC_PROP_EN_100HDX_CAP:
1342 1343 case MAC_PROP_EN_10FDX_CAP:
1343 1344 case MAC_PROP_EN_10HDX_CAP:
1344 1345 case MAC_PROP_AUTONEG:
1345 1346 case MAC_PROP_FLOWCTRL:
1346 1347 return (B_TRUE);
1347 1348 }
1348 1349 return (B_FALSE);
1349 1350 }
1350 1351
1351 1352 /* ARGSUSED */
1352 1353 int
1353 1354 igb_set_priv_prop(igb_t *igb, const char *pr_name,
1354 1355 uint_t pr_valsize, const void *pr_val)
1355 1356 {
1356 1357 int err = 0;
1357 1358 long result;
1358 1359 struct e1000_hw *hw = &igb->hw;
1359 1360 int i;
1360 1361
1361 1362 if (strcmp(pr_name, "_eee_support") == 0) {
1362 1363 if (pr_val == NULL)
1363 1364 return (EINVAL);
1364 1365 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1365 1366 switch (result) {
1366 1367 case 0:
1367 1368 case 1:
1368 1369 /*
1369 1370 * For now, only supported on I350/I354.
1370 1371 * Add new mac.type values (or use < instead)
1371 1372 * as new cards offer up EEE.
1372 1373 */
1373 1374 switch (hw->mac.type) {
1374 1375 case e1000_i350:
1375 1376 /* Must set this prior to the set call. */
1376 1377 hw->dev_spec._82575.eee_disable = !result;
1377 1378 if (e1000_set_eee_i350(hw) != E1000_SUCCESS)
1378 1379 err = EIO;
1379 1380 break;
1380 1381 case e1000_i354:
1381 1382 /* Must set this prior to the set call. */
1382 1383 hw->dev_spec._82575.eee_disable = !result;
1383 1384 if (e1000_set_eee_i354(hw) != E1000_SUCCESS)
1384 1385 err = EIO;
1385 1386 break;
1386 1387 default:
1387 1388 return (ENXIO);
1388 1389 }
1389 1390 break;
1390 1391 default:
1391 1392 err = EINVAL;
1392 1393 /* FALLTHRU */
1393 1394 }
1394 1395 return (err);
1395 1396 }
1396 1397 if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1397 1398 if (pr_val == NULL) {
1398 1399 err = EINVAL;
1399 1400 return (err);
1400 1401 }
1401 1402 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1402 1403 if (result < MIN_TX_COPY_THRESHOLD ||
1403 1404 result > MAX_TX_COPY_THRESHOLD)
1404 1405 err = EINVAL;
1405 1406 else {
1406 1407 igb->tx_copy_thresh = (uint32_t)result;
1407 1408 }
1408 1409 return (err);
1409 1410 }
1410 1411 if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1411 1412 if (pr_val == NULL) {
1412 1413 err = EINVAL;
1413 1414 return (err);
1414 1415 }
1415 1416 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1416 1417 if (result < MIN_TX_RECYCLE_THRESHOLD ||
1417 1418 result > MAX_TX_RECYCLE_THRESHOLD)
1418 1419 err = EINVAL;
1419 1420 else {
1420 1421 igb->tx_recycle_thresh = (uint32_t)result;
1421 1422 }
1422 1423 return (err);
1423 1424 }
1424 1425 if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1425 1426 if (pr_val == NULL) {
1426 1427 err = EINVAL;
1427 1428 return (err);
1428 1429 }
1429 1430 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1430 1431 if (result < MIN_TX_OVERLOAD_THRESHOLD ||
1431 1432 result > MAX_TX_OVERLOAD_THRESHOLD)
1432 1433 err = EINVAL;
1433 1434 else {
1434 1435 igb->tx_overload_thresh = (uint32_t)result;
1435 1436 }
1436 1437 return (err);
1437 1438 }
1438 1439 if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1439 1440 if (pr_val == NULL) {
1440 1441 err = EINVAL;
1441 1442 return (err);
1442 1443 }
1443 1444 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1444 1445 if (result < MIN_TX_RESCHED_THRESHOLD ||
1445 1446 result > MAX_TX_RESCHED_THRESHOLD ||
1446 1447 result > igb->tx_ring_size)
1447 1448 err = EINVAL;
1448 1449 else {
1449 1450 igb->tx_resched_thresh = (uint32_t)result;
1450 1451 }
1451 1452 return (err);
1452 1453 }
1453 1454 if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1454 1455 if (pr_val == NULL) {
1455 1456 err = EINVAL;
1456 1457 return (err);
1457 1458 }
1458 1459 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1459 1460 if (result < MIN_RX_COPY_THRESHOLD ||
1460 1461 result > MAX_RX_COPY_THRESHOLD)
1461 1462 err = EINVAL;
1462 1463 else {
1463 1464 igb->rx_copy_thresh = (uint32_t)result;
1464 1465 }
1465 1466 return (err);
1466 1467 }
1467 1468 if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1468 1469 if (pr_val == NULL) {
1469 1470 err = EINVAL;
1470 1471 return (err);
1471 1472 }
1472 1473 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1473 1474 if (result < MIN_RX_LIMIT_PER_INTR ||
1474 1475 result > MAX_RX_LIMIT_PER_INTR)
1475 1476 err = EINVAL;
1476 1477 else {
1477 1478 igb->rx_limit_per_intr = (uint32_t)result;
1478 1479 }
1479 1480 return (err);
1480 1481 }
1481 1482 if (strcmp(pr_name, "_intr_throttling") == 0) {
1482 1483 if (pr_val == NULL) {
1483 1484 err = EINVAL;
1484 1485 return (err);
1485 1486 }
1486 1487 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1487 1488
1488 1489 if (result < igb->capab->min_intr_throttle ||
1489 1490 result > igb->capab->max_intr_throttle)
1490 1491 err = EINVAL;
1491 1492 else {
1492 1493 igb->intr_throttling[0] = (uint32_t)result;
1493 1494
1494 1495 for (i = 0; i < MAX_NUM_EITR; i++)
1495 1496 igb->intr_throttling[i] =
1496 1497 igb->intr_throttling[0];
1497 1498
1498 1499 /* Set interrupt throttling rate */
1499 1500 for (i = 0; i < igb->intr_cnt; i++)
1500 1501 E1000_WRITE_REG(hw, E1000_EITR(i),
1501 1502 igb->intr_throttling[i]);
1502 1503 }
1503 1504 return (err);
1504 1505 }
1505 1506 return (ENOTSUP);
1506 1507 }
1507 1508
1508 1509 int
1509 1510 igb_get_priv_prop(igb_t *igb, const char *pr_name, uint_t pr_valsize,
1510 1511 void *pr_val)
1511 1512 {
1512 1513 int value;
1513 1514
1514 1515 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1515 1516 value = igb->param_adv_pause_cap;
1516 1517 } else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1517 1518 value = igb->param_adv_asym_pause_cap;
1518 1519 } else if (strcmp(pr_name, "_eee_support") == 0) {
1519 1520 /*
1520 1521 * For now, only supported on I350. Add new mac.type values
1521 1522 * (or use < instead) as new cards offer up EEE.
1522 1523 */
1523 1524 switch (igb->hw.mac.type) {
1524 1525 case e1000_i350:
1525 1526 case e1000_i354:
1526 1527 value = !(igb->hw.dev_spec._82575.eee_disable);
1527 1528 break;
1528 1529 default:
1529 1530 value = 0;
1530 1531 }
1531 1532 } else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1532 1533 value = igb->tx_copy_thresh;
1533 1534 } else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1534 1535 value = igb->tx_recycle_thresh;
1535 1536 } else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1536 1537 value = igb->tx_overload_thresh;
1537 1538 } else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1538 1539 value = igb->tx_resched_thresh;
1539 1540 } else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1540 1541 value = igb->rx_copy_thresh;
1541 1542 } else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1542 1543 value = igb->rx_limit_per_intr;
1543 1544 } else if (strcmp(pr_name, "_intr_throttling") == 0) {
1544 1545 value = igb->intr_throttling[0];
1545 1546 } else {
1546 1547 return (ENOTSUP);
1547 1548 }
1548 1549
1549 1550 (void) snprintf(pr_val, pr_valsize, "%d", value);
1550 1551 return (0);
1551 1552 }
1552 1553
1553 1554 void
1554 1555 igb_priv_prop_info(igb_t *igb, const char *pr_name, mac_prop_info_handle_t prh)
1555 1556 {
1556 1557 char valstr[64];
1557 1558 int value;
1558 1559
1559 1560 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
1560 1561 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1561 1562 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1562 1563 return;
1563 1564 } else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1564 1565 value = DEFAULT_TX_COPY_THRESHOLD;
1565 1566 } else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1566 1567 value = DEFAULT_TX_RECYCLE_THRESHOLD;
1567 1568 } else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1568 1569 value = DEFAULT_TX_OVERLOAD_THRESHOLD;
1569 1570 } else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1570 1571 value = DEFAULT_TX_RESCHED_THRESHOLD;
1571 1572 } else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1572 1573 value = DEFAULT_RX_COPY_THRESHOLD;
1573 1574 } else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1574 1575 value = DEFAULT_RX_LIMIT_PER_INTR;
1575 1576 } else if (strcmp(pr_name, "_intr_throttling") == 0) {
1576 1577 value = igb->capab->def_intr_throttle;
1577 1578 } else {
1578 1579 return;
1579 1580 }
1580 1581
1581 1582 (void) snprintf(valstr, sizeof (valstr), "%d", value);
1582 1583 mac_prop_info_set_default_str(prh, valstr);
1583 1584 }
↓ open down ↓ |
352 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX