Print this page
6601 Various GLD drivers return EINVAL instead of ENOTSUP for unused mac_prop_id_t's
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Igor Kozhukhov <ikozhukhov@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nxge/nxge_main.c
+++ new/usr/src/uts/common/io/nxge/nxge_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
23 24 */
24 25
25 26 /*
26 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
27 28 */
28 29 #include <sys/nxge/nxge_impl.h>
29 30 #include <sys/nxge/nxge_hio.h>
30 31 #include <sys/nxge/nxge_rxdma.h>
31 32 #include <sys/pcie.h>
32 33
33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */
34 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */
35 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */
36 37 /*
37 38 * PSARC/2007/453 MSI-X interrupt limit override
38 39 */
39 40 uint32_t nxge_msi_enable = 2;
40 41
41 42 /*
42 43 * Software workaround for a Neptune (PCI-E)
43 44 * hardware interrupt bug which the hardware
44 45 * may generate spurious interrupts after the
45 46 * device interrupt handler was removed. If this flag
46 47 * is enabled, the driver will reset the
47 48 * hardware when devices are being detached.
48 49 */
49 50 uint32_t nxge_peu_reset_enable = 0;
50 51
51 52 /*
52 53 * Software workaround for the hardware
53 54 * checksum bugs that affect packet transmission
54 55 * and receive:
55 56 *
56 57 * Usage of nxge_cksum_offload:
57 58 *
58 59 * (1) nxge_cksum_offload = 0 (default):
59 60 * - transmits packets:
60 61 * TCP: uses the hardware checksum feature.
61 62 * UDP: driver will compute the software checksum
62 63 * based on the partial checksum computed
63 64 * by the IP layer.
64 65 * - receives packets
65 66 * TCP: marks packets checksum flags based on hardware result.
66 67 * UDP: will not mark checksum flags.
67 68 *
68 69 * (2) nxge_cksum_offload = 1:
69 70 * - transmit packets:
70 71 * TCP/UDP: uses the hardware checksum feature.
71 72 * - receives packets
72 73 * TCP/UDP: marks packet checksum flags based on hardware result.
73 74 *
74 75 * (3) nxge_cksum_offload = 2:
75 76 * - The driver will not register its checksum capability.
76 77 * Checksum for both TCP and UDP will be computed
77 78 * by the stack.
78 79 * - The software LSO is not allowed in this case.
79 80 *
80 81 * (4) nxge_cksum_offload > 2:
81 82 * - Will be treated as it is set to 2
82 83 * (stack will compute the checksum).
83 84 *
84 85 * (5) If the hardware bug is fixed, this workaround
85 86 * needs to be updated accordingly to reflect
86 87 * the new hardware revision.
87 88 */
88 89 uint32_t nxge_cksum_offload = 0;
89 90
90 91 /*
91 92 * Globals: tunable parameters (/etc/system or adb)
92 93 *
93 94 */
94 95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
95 96 uint32_t nxge_rbr_spare_size = 0;
96 97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT;
97 98 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
98 99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
99 100 boolean_t nxge_no_msg = B_TRUE; /* control message display */
100 101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */
101 102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX;
102 103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN;
103 104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN;
104 105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU;
105 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL;
106 107
107 108 /* MAX LSO size */
108 109 #define NXGE_LSO_MAXLEN 65535
109 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN;
110 111
111 112
112 113 /*
113 114 * Add tunable to reduce the amount of time spent in the
114 115 * ISR doing Rx Processing.
115 116 */
116 117 uint32_t nxge_max_rx_pkts = 1024;
117 118
118 119 /*
119 120 * Tunables to manage the receive buffer blocks.
120 121 *
121 122 * nxge_rx_threshold_hi: copy all buffers.
122 123 * nxge_rx_bcopy_size_type: receive buffer block size type.
123 124 * nxge_rx_threshold_lo: copy only up to tunable block size type.
124 125 */
125 126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
126 127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
127 128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
128 129
129 130 /* Use kmem_alloc() to allocate data buffers. */
130 131 #if defined(__sparc)
131 132 uint32_t nxge_use_kmem_alloc = 1;
132 133 #elif defined(__i386)
133 134 uint32_t nxge_use_kmem_alloc = 0;
134 135 #else
135 136 uint32_t nxge_use_kmem_alloc = 1;
136 137 #endif
137 138
138 139 rtrace_t npi_rtracebuf;
139 140
140 141 /*
141 142 * The hardware sometimes fails to allow enough time for the link partner
142 143 * to send an acknowledgement for packets that the hardware sent to it. The
143 144 * hardware resends the packets earlier than it should be in those instances.
144 145 * This behavior caused some switches to acknowledge the wrong packets
145 146 * and it triggered the fatal error.
146 147 * This software workaround is to set the replay timer to a value
147 148 * suggested by the hardware team.
148 149 *
149 150 * PCI config space replay timer register:
150 151 * The following replay timeout value is 0xc
151 152 * for bit 14:18.
152 153 */
153 154 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8
154 155 #define PCI_REPLAY_TIMEOUT_SHIFT 14
155 156
156 157 uint32_t nxge_set_replay_timer = 1;
157 158 uint32_t nxge_replay_timeout = 0xc;
158 159
159 160 /*
160 161 * The transmit serialization sometimes causes
161 162 * longer sleep before calling the driver transmit
162 163 * function as it sleeps longer than it should.
163 164 * The performace group suggests that a time wait tunable
164 165 * can be used to set the maximum wait time when needed
165 166 * and the default is set to 1 tick.
166 167 */
167 168 uint32_t nxge_tx_serial_maxsleep = 1;
168 169
169 170 #if defined(sun4v)
170 171 /*
171 172 * Hypervisor N2/NIU services information.
172 173 */
173 174 /*
174 175 * The following is the default API supported:
175 176 * major 1 and minor 1.
176 177 *
177 178 * Please update the MAX_NIU_MAJORS,
178 179 * MAX_NIU_MINORS, and minor number supported
179 180 * when the newer Hypervior API interfaces
180 181 * are added. Also, please update nxge_hsvc_register()
181 182 * if needed.
182 183 */
183 184 static hsvc_info_t niu_hsvc = {
184 185 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
185 186 NIU_MINOR_VER, "nxge"
186 187 };
187 188
188 189 static int nxge_hsvc_register(p_nxge_t);
189 190 #endif
190 191
191 192 /*
192 193 * Function Prototypes
193 194 */
194 195 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
195 196 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
196 197 static void nxge_unattach(p_nxge_t);
197 198 static int nxge_quiesce(dev_info_t *);
198 199
199 200 #if NXGE_PROPERTY
200 201 static void nxge_remove_hard_properties(p_nxge_t);
201 202 #endif
202 203
203 204 /*
204 205 * These two functions are required by nxge_hio.c
205 206 */
206 207 extern int nxge_m_mmac_remove(void *arg, int slot);
207 208 extern void nxge_grp_cleanup(p_nxge_t nxge);
208 209
209 210 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
210 211
211 212 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
212 213 static void nxge_destroy_mutexes(p_nxge_t);
213 214
214 215 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
215 216 static void nxge_unmap_regs(p_nxge_t nxgep);
216 217 #ifdef NXGE_DEBUG
217 218 static void nxge_test_map_regs(p_nxge_t nxgep);
218 219 #endif
219 220
220 221 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
221 222 static void nxge_remove_intrs(p_nxge_t nxgep);
222 223
223 224 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
224 225 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
225 226 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
226 227 static void nxge_intrs_enable(p_nxge_t nxgep);
227 228 static void nxge_intrs_disable(p_nxge_t nxgep);
228 229
229 230 static void nxge_suspend(p_nxge_t);
230 231 static nxge_status_t nxge_resume(p_nxge_t);
231 232
232 233 static nxge_status_t nxge_setup_dev(p_nxge_t);
233 234 static void nxge_destroy_dev(p_nxge_t);
234 235
235 236 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
236 237 static void nxge_free_mem_pool(p_nxge_t);
237 238
238 239 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
239 240 static void nxge_free_rx_mem_pool(p_nxge_t);
240 241
241 242 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
242 243 static void nxge_free_tx_mem_pool(p_nxge_t);
243 244
244 245 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
245 246 struct ddi_dma_attr *,
246 247 size_t, ddi_device_acc_attr_t *, uint_t,
247 248 p_nxge_dma_common_t);
248 249
249 250 static void nxge_dma_mem_free(p_nxge_dma_common_t);
250 251 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
251 252
252 253 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
253 254 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
254 255 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
255 256
256 257 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
257 258 p_nxge_dma_common_t *, size_t);
258 259 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
259 260
260 261 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
261 262 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
262 263 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
263 264
264 265 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
265 266 p_nxge_dma_common_t *,
266 267 size_t);
267 268 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
268 269
269 270 static int nxge_init_common_dev(p_nxge_t);
270 271 static void nxge_uninit_common_dev(p_nxge_t);
271 272 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *,
272 273 char *, caddr_t);
273 274 #if defined(sun4v)
274 275 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep);
275 276 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm);
276 277 #endif
277 278
278 279 /*
279 280 * The next declarations are for the GLDv3 interface.
280 281 */
281 282 static int nxge_m_start(void *);
282 283 static void nxge_m_stop(void *);
283 284 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
284 285 static int nxge_m_promisc(void *, boolean_t);
285 286 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
286 287 nxge_status_t nxge_mac_register(p_nxge_t);
287 288 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
288 289 int slot, int rdctbl, boolean_t usetbl);
289 290 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot,
290 291 boolean_t factory);
291 292
292 293 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *);
293 294 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
294 295 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
295 296 uint_t, const void *);
296 297 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
297 298 uint_t, void *);
298 299 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t,
299 300 mac_prop_info_handle_t);
300 301 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t);
301 302 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
302 303 const void *);
303 304 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *);
304 305 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int,
305 306 mac_ring_info_t *, mac_ring_handle_t);
306 307 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t,
307 308 mac_ring_type_t);
308 309 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t,
309 310 mac_ring_type_t);
310 311
311 312 static void nxge_niu_peu_reset(p_nxge_t nxgep);
312 313 static void nxge_set_pci_replay_timeout(nxge_t *);
313 314
314 315 char *nxge_priv_props[] = {
315 316 "_adv_10gfdx_cap",
316 317 "_adv_pause_cap",
317 318 "_function_number",
318 319 "_fw_version",
319 320 "_port_mode",
320 321 "_hot_swap_phy",
321 322 "_rxdma_intr_time",
322 323 "_rxdma_intr_pkts",
323 324 "_class_opt_ipv4_tcp",
324 325 "_class_opt_ipv4_udp",
325 326 "_class_opt_ipv4_ah",
326 327 "_class_opt_ipv4_sctp",
327 328 "_class_opt_ipv6_tcp",
328 329 "_class_opt_ipv6_udp",
329 330 "_class_opt_ipv6_ah",
330 331 "_class_opt_ipv6_sctp",
331 332 "_soft_lso_enable",
332 333 NULL
333 334 };
334 335
335 336 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL
336 337 #define MAX_DUMP_SZ 256
337 338
338 339 #define NXGE_M_CALLBACK_FLAGS \
339 340 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
340 341
341 342 mac_callbacks_t nxge_m_callbacks = {
342 343 NXGE_M_CALLBACK_FLAGS,
343 344 nxge_m_stat,
344 345 nxge_m_start,
345 346 nxge_m_stop,
346 347 nxge_m_promisc,
347 348 nxge_m_multicst,
348 349 NULL,
349 350 NULL,
350 351 NULL,
351 352 nxge_m_ioctl,
352 353 nxge_m_getcapab,
353 354 NULL,
354 355 NULL,
355 356 nxge_m_setprop,
356 357 nxge_m_getprop,
357 358 nxge_m_propinfo
358 359 };
359 360
360 361 void
361 362 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
362 363
363 364 /* PSARC/2007/453 MSI-X interrupt limit override. */
364 365 #define NXGE_MSIX_REQUEST_10G 8
365 366 #define NXGE_MSIX_REQUEST_1G 2
366 367 static int nxge_create_msi_property(p_nxge_t);
367 368 /*
368 369 * For applications that care about the
369 370 * latency, it was requested by PAE and the
370 371 * customers that the driver has tunables that
371 372 * allow the user to tune it to a higher number
372 373 * interrupts to spread the interrupts among
373 374 * multiple channels. The DDI framework limits
374 375 * the maximum number of MSI-X resources to allocate
375 376 * to 8 (ddi_msix_alloc_limit). If more than 8
376 377 * is set, ddi_msix_alloc_limit must be set accordingly.
377 378 * The default number of MSI interrupts are set to
378 379 * 8 for 10G and 2 for 1G link.
379 380 */
380 381 #define NXGE_MSIX_MAX_ALLOWED 32
381 382 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G;
382 383 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G;
383 384
384 385 /*
385 386 * These global variables control the message
386 387 * output.
387 388 */
388 389 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
389 390 uint64_t nxge_debug_level;
390 391
391 392 /*
392 393 * This list contains the instance structures for the Neptune
393 394 * devices present in the system. The lock exists to guarantee
394 395 * mutually exclusive access to the list.
395 396 */
396 397 void *nxge_list = NULL;
397 398 void *nxge_hw_list = NULL;
398 399 nxge_os_mutex_t nxge_common_lock;
399 400 nxge_os_mutex_t nxgedebuglock;
400 401
401 402 extern uint64_t npi_debug_level;
402 403
403 404 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *);
404 405 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *);
405 406 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t);
406 407 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t);
407 408 extern void nxge_fm_init(p_nxge_t,
408 409 ddi_device_acc_attr_t *,
409 410 ddi_dma_attr_t *);
410 411 extern void nxge_fm_fini(p_nxge_t);
411 412 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
412 413
413 414 /*
414 415 * Count used to maintain the number of buffers being used
415 416 * by Neptune instances and loaned up to the upper layers.
416 417 */
417 418 uint32_t nxge_mblks_pending = 0;
418 419
419 420 /*
420 421 * Device register access attributes for PIO.
421 422 */
422 423 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
423 424 DDI_DEVICE_ATTR_V1,
424 425 DDI_STRUCTURE_LE_ACC,
425 426 DDI_STRICTORDER_ACC,
426 427 DDI_DEFAULT_ACC
427 428 };
428 429
429 430 /*
430 431 * Device descriptor access attributes for DMA.
431 432 */
432 433 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
433 434 DDI_DEVICE_ATTR_V0,
434 435 DDI_STRUCTURE_LE_ACC,
435 436 DDI_STRICTORDER_ACC
436 437 };
437 438
438 439 /*
439 440 * Device buffer access attributes for DMA.
440 441 */
441 442 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
442 443 DDI_DEVICE_ATTR_V0,
443 444 DDI_STRUCTURE_BE_ACC,
444 445 DDI_STRICTORDER_ACC
445 446 };
446 447
447 448 ddi_dma_attr_t nxge_desc_dma_attr = {
448 449 DMA_ATTR_V0, /* version number. */
449 450 0, /* low address */
450 451 0xffffffffffffffff, /* high address */
451 452 0xffffffffffffffff, /* address counter max */
452 453 #ifndef NIU_PA_WORKAROUND
453 454 0x100000, /* alignment */
454 455 #else
455 456 0x2000,
456 457 #endif
457 458 0xfc00fc, /* dlim_burstsizes */
458 459 0x1, /* minimum transfer size */
459 460 0xffffffffffffffff, /* maximum transfer size */
460 461 0xffffffffffffffff, /* maximum segment size */
461 462 1, /* scatter/gather list length */
462 463 (unsigned int) 1, /* granularity */
463 464 0 /* attribute flags */
464 465 };
465 466
466 467 ddi_dma_attr_t nxge_tx_dma_attr = {
467 468 DMA_ATTR_V0, /* version number. */
468 469 0, /* low address */
469 470 0xffffffffffffffff, /* high address */
470 471 0xffffffffffffffff, /* address counter max */
471 472 #if defined(_BIG_ENDIAN)
472 473 0x2000, /* alignment */
473 474 #else
474 475 0x1000, /* alignment */
475 476 #endif
476 477 0xfc00fc, /* dlim_burstsizes */
477 478 0x1, /* minimum transfer size */
478 479 0xffffffffffffffff, /* maximum transfer size */
479 480 0xffffffffffffffff, /* maximum segment size */
480 481 5, /* scatter/gather list length */
481 482 (unsigned int) 1, /* granularity */
482 483 0 /* attribute flags */
483 484 };
484 485
485 486 ddi_dma_attr_t nxge_rx_dma_attr = {
486 487 DMA_ATTR_V0, /* version number. */
487 488 0, /* low address */
488 489 0xffffffffffffffff, /* high address */
489 490 0xffffffffffffffff, /* address counter max */
490 491 0x2000, /* alignment */
491 492 0xfc00fc, /* dlim_burstsizes */
492 493 0x1, /* minimum transfer size */
493 494 0xffffffffffffffff, /* maximum transfer size */
494 495 0xffffffffffffffff, /* maximum segment size */
495 496 1, /* scatter/gather list length */
496 497 (unsigned int) 1, /* granularity */
497 498 DDI_DMA_RELAXED_ORDERING /* attribute flags */
498 499 };
499 500
500 501 ddi_dma_lim_t nxge_dma_limits = {
501 502 (uint_t)0, /* dlim_addr_lo */
502 503 (uint_t)0xffffffff, /* dlim_addr_hi */
503 504 (uint_t)0xffffffff, /* dlim_cntr_max */
504 505 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
505 506 0x1, /* dlim_minxfer */
506 507 1024 /* dlim_speed */
507 508 };
508 509
509 510 dma_method_t nxge_force_dma = DVMA;
510 511
511 512 /*
512 513 * dma chunk sizes.
513 514 *
514 515 * Try to allocate the largest possible size
515 516 * so that fewer number of dma chunks would be managed
516 517 */
517 518 #ifdef NIU_PA_WORKAROUND
518 519 size_t alloc_sizes [] = {0x2000};
519 520 #else
520 521 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
521 522 0x10000, 0x20000, 0x40000, 0x80000,
522 523 0x100000, 0x200000, 0x400000, 0x800000,
523 524 0x1000000, 0x2000000, 0x4000000};
524 525 #endif
525 526
526 527 /*
527 528 * Translate "dev_t" to a pointer to the associated "dev_info_t".
528 529 */
529 530
530 531 extern void nxge_get_environs(nxge_t *);
531 532
532 533 static int
533 534 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
534 535 {
535 536 p_nxge_t nxgep = NULL;
536 537 int instance;
537 538 int status = DDI_SUCCESS;
538 539 uint8_t portn;
539 540 nxge_mmac_t *mmac_info;
540 541
541 542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
542 543
543 544 /*
544 545 * Get the device instance since we'll need to setup
545 546 * or retrieve a soft state for this instance.
546 547 */
547 548 instance = ddi_get_instance(dip);
548 549
549 550 switch (cmd) {
550 551 case DDI_ATTACH:
551 552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
552 553 break;
553 554
554 555 case DDI_RESUME:
555 556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
556 557 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
557 558 if (nxgep == NULL) {
558 559 status = DDI_FAILURE;
559 560 break;
560 561 }
561 562 if (nxgep->dip != dip) {
562 563 status = DDI_FAILURE;
563 564 break;
564 565 }
565 566 if (nxgep->suspended == DDI_PM_SUSPEND) {
566 567 status = ddi_dev_is_needed(nxgep->dip, 0, 1);
567 568 } else {
568 569 status = nxge_resume(nxgep);
569 570 }
570 571 goto nxge_attach_exit;
571 572
572 573 case DDI_PM_RESUME:
573 574 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
574 575 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
575 576 if (nxgep == NULL) {
576 577 status = DDI_FAILURE;
577 578 break;
578 579 }
579 580 if (nxgep->dip != dip) {
580 581 status = DDI_FAILURE;
581 582 break;
582 583 }
583 584 status = nxge_resume(nxgep);
584 585 goto nxge_attach_exit;
585 586
586 587 default:
587 588 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
588 589 status = DDI_FAILURE;
589 590 goto nxge_attach_exit;
590 591 }
591 592
592 593
593 594 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
594 595 status = DDI_FAILURE;
595 596 goto nxge_attach_exit;
596 597 }
597 598
598 599 nxgep = ddi_get_soft_state(nxge_list, instance);
599 600 if (nxgep == NULL) {
600 601 status = NXGE_ERROR;
601 602 goto nxge_attach_fail2;
602 603 }
603 604
604 605 nxgep->nxge_magic = NXGE_MAGIC;
605 606
606 607 nxgep->drv_state = 0;
607 608 nxgep->dip = dip;
608 609 nxgep->instance = instance;
609 610 nxgep->p_dip = ddi_get_parent(dip);
610 611 nxgep->nxge_debug_level = nxge_debug_level;
611 612 npi_debug_level = nxge_debug_level;
612 613
613 614 /* Are we a guest running in a Hybrid I/O environment? */
614 615 nxge_get_environs(nxgep);
615 616
616 617 status = nxge_map_regs(nxgep);
617 618
618 619 if (status != NXGE_OK) {
619 620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
620 621 goto nxge_attach_fail3;
621 622 }
622 623
623 624 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr);
624 625
625 626 /* Create & initialize the per-Neptune data structure */
626 627 /* (even if we're a guest). */
627 628 status = nxge_init_common_dev(nxgep);
628 629 if (status != NXGE_OK) {
629 630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
630 631 "nxge_init_common_dev failed"));
631 632 goto nxge_attach_fail4;
632 633 }
633 634
634 635 /*
635 636 * Software workaround: set the replay timer.
636 637 */
637 638 if (nxgep->niu_type != N2_NIU) {
638 639 nxge_set_pci_replay_timeout(nxgep);
639 640 }
640 641
641 642 #if defined(sun4v)
642 643 /* This is required by nxge_hio_init(), which follows. */
643 644 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS)
644 645 goto nxge_attach_fail4;
645 646 #endif
646 647
647 648 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) {
648 649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
649 650 "nxge_hio_init failed"));
650 651 goto nxge_attach_fail4;
651 652 }
652 653
653 654 if (nxgep->niu_type == NEPTUNE_2_10GF) {
654 655 if (nxgep->function_num > 1) {
655 656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
656 657 " function %d. Only functions 0 and 1 are "
657 658 "supported for this card.", nxgep->function_num));
658 659 status = NXGE_ERROR;
659 660 goto nxge_attach_fail4;
660 661 }
661 662 }
662 663
663 664 if (isLDOMguest(nxgep)) {
664 665 /*
665 666 * Use the function number here.
666 667 */
667 668 nxgep->mac.portnum = nxgep->function_num;
668 669 nxgep->mac.porttype = PORT_TYPE_LOGICAL;
669 670
670 671 /* XXX We'll set the MAC address counts to 1 for now. */
671 672 mmac_info = &nxgep->nxge_mmac_info;
672 673 mmac_info->num_mmac = 1;
673 674 mmac_info->naddrfree = 1;
674 675 } else {
675 676 portn = NXGE_GET_PORT_NUM(nxgep->function_num);
676 677 nxgep->mac.portnum = portn;
677 678 if ((portn == 0) || (portn == 1))
678 679 nxgep->mac.porttype = PORT_TYPE_XMAC;
679 680 else
680 681 nxgep->mac.porttype = PORT_TYPE_BMAC;
681 682 /*
682 683 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
683 684 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
684 685 * The two types of MACs have different characterizations.
685 686 */
686 687 mmac_info = &nxgep->nxge_mmac_info;
687 688 if (nxgep->function_num < 2) {
688 689 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
689 690 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
690 691 } else {
691 692 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
692 693 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
693 694 }
694 695 }
695 696 /*
696 697 * Setup the Ndd parameters for the this instance.
697 698 */
698 699 nxge_init_param(nxgep);
699 700
700 701 /*
701 702 * Setup Register Tracing Buffer.
702 703 */
703 704 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
704 705
705 706 /* init stats ptr */
706 707 nxge_init_statsp(nxgep);
707 708
708 709 /*
709 710 * Copy the vpd info from eeprom to a local data
710 711 * structure, and then check its validity.
711 712 */
712 713 if (!isLDOMguest(nxgep)) {
713 714 int *regp;
714 715 uint_t reglen;
715 716 int rv;
716 717
717 718 nxge_vpd_info_get(nxgep);
718 719
719 720 /* Find the NIU config handle. */
720 721 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
721 722 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS,
722 723 "reg", ®p, ®len);
723 724
724 725 if (rv != DDI_PROP_SUCCESS) {
725 726 goto nxge_attach_fail5;
726 727 }
727 728 /*
728 729 * The address_hi, that is the first int, in the reg
729 730 * property consists of config handle, but need to remove
730 731 * the bits 28-31 which are OBP specific info.
731 732 */
732 733 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF;
733 734 ddi_prop_free(regp);
734 735 }
735 736
736 737 /*
737 738 * Set the defaults for the MTU size.
738 739 */
739 740 nxge_hw_id_init(nxgep);
740 741
741 742 if (isLDOMguest(nxgep)) {
742 743 uchar_t *prop_val;
743 744 uint_t prop_len;
744 745 uint32_t max_frame_size;
745 746
746 747 extern void nxge_get_logical_props(p_nxge_t);
747 748
748 749 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR;
749 750 nxgep->mac.portmode = PORT_LOGICAL;
750 751 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
751 752 "phy-type", "virtual transceiver");
752 753
753 754 nxgep->nports = 1;
754 755 nxgep->board_ver = 0; /* XXX What? */
755 756
756 757 /*
757 758 * local-mac-address property gives us info on which
758 759 * specific MAC address the Hybrid resource is associated
759 760 * with.
760 761 */
761 762 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
762 763 "local-mac-address", &prop_val,
763 764 &prop_len) != DDI_PROP_SUCCESS) {
764 765 goto nxge_attach_fail5;
765 766 }
766 767 if (prop_len != ETHERADDRL) {
767 768 ddi_prop_free(prop_val);
768 769 goto nxge_attach_fail5;
769 770 }
770 771 ether_copy(prop_val, nxgep->hio_mac_addr);
771 772 ddi_prop_free(prop_val);
772 773 nxge_get_logical_props(nxgep);
773 774
774 775 /*
775 776 * Enable Jumbo property based on the "max-frame-size"
776 777 * property value.
777 778 */
778 779 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY,
779 780 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
780 781 "max-frame-size", NXGE_MTU_DEFAULT_MAX);
781 782 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) &&
782 783 (max_frame_size <= TX_JUMBO_MTU)) {
783 784 nxgep->mac.is_jumbo = B_TRUE;
784 785 nxgep->mac.maxframesize = (uint16_t)max_frame_size;
785 786 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
786 787 NXGE_EHEADER_VLAN_CRC;
787 788 }
788 789 } else {
789 790 status = nxge_xcvr_find(nxgep);
790 791
791 792 if (status != NXGE_OK) {
792 793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
793 794 " Couldn't determine card type"
794 795 " .... exit "));
795 796 goto nxge_attach_fail5;
796 797 }
797 798
798 799 status = nxge_get_config_properties(nxgep);
799 800
800 801 if (status != NXGE_OK) {
801 802 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
802 803 "get_hw create failed"));
803 804 goto nxge_attach_fail;
804 805 }
805 806 }
806 807
807 808 /*
808 809 * Setup the Kstats for the driver.
809 810 */
810 811 nxge_setup_kstats(nxgep);
811 812
812 813 if (!isLDOMguest(nxgep))
813 814 nxge_setup_param(nxgep);
814 815
815 816 status = nxge_setup_system_dma_pages(nxgep);
816 817 if (status != NXGE_OK) {
817 818 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
818 819 goto nxge_attach_fail;
819 820 }
820 821
821 822
822 823 if (!isLDOMguest(nxgep))
823 824 nxge_hw_init_niu_common(nxgep);
824 825
825 826 status = nxge_setup_mutexes(nxgep);
826 827 if (status != NXGE_OK) {
827 828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
828 829 goto nxge_attach_fail;
829 830 }
830 831
831 832 #if defined(sun4v)
832 833 if (isLDOMguest(nxgep)) {
833 834 /* Find our VR & channel sets. */
834 835 status = nxge_hio_vr_add(nxgep);
835 836 if (status != DDI_SUCCESS) {
836 837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
837 838 "nxge_hio_vr_add failed"));
838 839 (void) hsvc_unregister(&nxgep->niu_hsvc);
839 840 nxgep->niu_hsvc_available = B_FALSE;
840 841 goto nxge_attach_fail;
841 842 }
842 843 goto nxge_attach_exit;
843 844 }
844 845 #endif
845 846
846 847 status = nxge_setup_dev(nxgep);
847 848 if (status != DDI_SUCCESS) {
848 849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
849 850 goto nxge_attach_fail;
850 851 }
851 852
852 853 status = nxge_add_intrs(nxgep);
853 854 if (status != DDI_SUCCESS) {
854 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
855 856 goto nxge_attach_fail;
856 857 }
857 858
858 859 /* If a guest, register with vio_net instead. */
859 860 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
860 861 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
861 862 "unable to register to mac layer (%d)", status));
862 863 goto nxge_attach_fail;
863 864 }
864 865
865 866 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
866 867
867 868 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
868 869 "registered to mac (instance %d)", instance));
869 870
870 871 /* nxge_link_monitor calls xcvr.check_link recursively */
871 872 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
872 873
873 874 goto nxge_attach_exit;
874 875
875 876 nxge_attach_fail:
876 877 nxge_unattach(nxgep);
877 878 goto nxge_attach_fail1;
878 879
879 880 nxge_attach_fail5:
880 881 /*
881 882 * Tear down the ndd parameters setup.
882 883 */
883 884 nxge_destroy_param(nxgep);
884 885
885 886 /*
886 887 * Tear down the kstat setup.
887 888 */
888 889 nxge_destroy_kstats(nxgep);
889 890
890 891 nxge_attach_fail4:
891 892 if (nxgep->nxge_hw_p) {
892 893 nxge_uninit_common_dev(nxgep);
893 894 nxgep->nxge_hw_p = NULL;
894 895 }
895 896
896 897 nxge_attach_fail3:
897 898 /*
898 899 * Unmap the register setup.
899 900 */
900 901 nxge_unmap_regs(nxgep);
901 902
902 903 nxge_fm_fini(nxgep);
903 904
904 905 nxge_attach_fail2:
905 906 ddi_soft_state_free(nxge_list, nxgep->instance);
906 907
907 908 nxge_attach_fail1:
908 909 if (status != NXGE_OK)
909 910 status = (NXGE_ERROR | NXGE_DDI_FAILED);
910 911 nxgep = NULL;
911 912
912 913 nxge_attach_exit:
913 914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
914 915 status));
915 916
916 917 return (status);
917 918 }
918 919
919 920 static int
920 921 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
921 922 {
922 923 int status = DDI_SUCCESS;
923 924 int instance;
924 925 p_nxge_t nxgep = NULL;
925 926
926 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
927 928 instance = ddi_get_instance(dip);
928 929 nxgep = ddi_get_soft_state(nxge_list, instance);
929 930 if (nxgep == NULL) {
930 931 status = DDI_FAILURE;
931 932 goto nxge_detach_exit;
932 933 }
933 934
934 935 switch (cmd) {
935 936 case DDI_DETACH:
936 937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
937 938 break;
938 939
939 940 case DDI_PM_SUSPEND:
940 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
941 942 nxgep->suspended = DDI_PM_SUSPEND;
942 943 nxge_suspend(nxgep);
943 944 break;
944 945
945 946 case DDI_SUSPEND:
946 947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
947 948 if (nxgep->suspended != DDI_PM_SUSPEND) {
948 949 nxgep->suspended = DDI_SUSPEND;
949 950 nxge_suspend(nxgep);
950 951 }
951 952 break;
952 953
953 954 default:
954 955 status = DDI_FAILURE;
955 956 }
956 957
957 958 if (cmd != DDI_DETACH)
958 959 goto nxge_detach_exit;
959 960
960 961 /*
961 962 * Stop the xcvr polling.
962 963 */
963 964 nxgep->suspended = cmd;
964 965
965 966 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
966 967
967 968 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
968 969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
969 970 "<== nxge_detach status = 0x%08X", status));
970 971 return (DDI_FAILURE);
971 972 }
972 973
973 974 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
974 975 "<== nxge_detach (mac_unregister) status = 0x%08X", status));
975 976
976 977 nxge_unattach(nxgep);
977 978 nxgep = NULL;
978 979
979 980 nxge_detach_exit:
980 981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
981 982 status));
982 983
983 984 return (status);
984 985 }
985 986
986 987 static void
987 988 nxge_unattach(p_nxge_t nxgep)
988 989 {
989 990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
990 991
991 992 if (nxgep == NULL || nxgep->dev_regs == NULL) {
992 993 return;
993 994 }
994 995
995 996 nxgep->nxge_magic = 0;
996 997
997 998 if (nxgep->nxge_timerid) {
998 999 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
999 1000 nxgep->nxge_timerid = 0;
1000 1001 }
1001 1002
1002 1003 /*
1003 1004 * If this flag is set, it will affect the Neptune
1004 1005 * only.
1005 1006 */
1006 1007 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) {
1007 1008 nxge_niu_peu_reset(nxgep);
1008 1009 }
1009 1010
1010 1011 #if defined(sun4v)
1011 1012 if (isLDOMguest(nxgep)) {
1012 1013 (void) nxge_hio_vr_release(nxgep);
1013 1014 }
1014 1015 #endif
1015 1016
1016 1017 if (nxgep->nxge_hw_p) {
1017 1018 nxge_uninit_common_dev(nxgep);
1018 1019 nxgep->nxge_hw_p = NULL;
1019 1020 }
1020 1021
1021 1022 #if defined(sun4v)
1022 1023 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
1023 1024 (void) hsvc_unregister(&nxgep->niu_hsvc);
1024 1025 nxgep->niu_hsvc_available = B_FALSE;
1025 1026 }
1026 1027 #endif
1027 1028 /*
1028 1029 * Stop any further interrupts.
1029 1030 */
1030 1031 nxge_remove_intrs(nxgep);
1031 1032
1032 1033 /*
1033 1034 * Stop the device and free resources.
1034 1035 */
1035 1036 if (!isLDOMguest(nxgep)) {
1036 1037 nxge_destroy_dev(nxgep);
1037 1038 }
1038 1039
1039 1040 /*
1040 1041 * Tear down the ndd parameters setup.
1041 1042 */
1042 1043 nxge_destroy_param(nxgep);
1043 1044
1044 1045 /*
1045 1046 * Tear down the kstat setup.
1046 1047 */
1047 1048 nxge_destroy_kstats(nxgep);
1048 1049
1049 1050 /*
1050 1051 * Free any memory allocated for PHY properties
1051 1052 */
1052 1053 if (nxgep->phy_prop.cnt > 0) {
1053 1054 KMEM_FREE(nxgep->phy_prop.arr,
1054 1055 sizeof (nxge_phy_mdio_val_t) * nxgep->phy_prop.cnt);
1055 1056 nxgep->phy_prop.cnt = 0;
1056 1057 }
1057 1058
1058 1059 /*
1059 1060 * Destroy all mutexes.
1060 1061 */
1061 1062 nxge_destroy_mutexes(nxgep);
1062 1063
1063 1064 /*
1064 1065 * Remove the list of ndd parameters which
1065 1066 * were setup during attach.
1066 1067 */
1067 1068 if (nxgep->dip) {
1068 1069 NXGE_DEBUG_MSG((nxgep, OBP_CTL,
1069 1070 " nxge_unattach: remove all properties"));
1070 1071
1071 1072 (void) ddi_prop_remove_all(nxgep->dip);
1072 1073 }
1073 1074
1074 1075 #if NXGE_PROPERTY
1075 1076 nxge_remove_hard_properties(nxgep);
1076 1077 #endif
1077 1078
1078 1079 /*
1079 1080 * Unmap the register setup.
1080 1081 */
1081 1082 nxge_unmap_regs(nxgep);
1082 1083
1083 1084 nxge_fm_fini(nxgep);
1084 1085
1085 1086 ddi_soft_state_free(nxge_list, nxgep->instance);
1086 1087
1087 1088 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
1088 1089 }
1089 1090
1090 1091 #if defined(sun4v)
1091 1092 int
1092 1093 nxge_hsvc_register(nxge_t *nxgep)
1093 1094 {
1094 1095 nxge_status_t status;
1095 1096 int i, j;
1096 1097
1097 1098 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register"));
1098 1099 if (nxgep->niu_type != N2_NIU) {
1099 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register"));
1100 1101 return (DDI_SUCCESS);
1101 1102 }
1102 1103
1103 1104 /*
1104 1105 * Currently, the NIU Hypervisor API supports two major versions:
1105 1106 * version 1 and 2.
1106 1107 * If Hypervisor introduces a higher major or minor version,
1107 1108 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
1108 1109 */
1109 1110 nxgep->niu_hsvc_available = B_FALSE;
1110 1111 bcopy(&niu_hsvc, &nxgep->niu_hsvc,
1111 1112 sizeof (hsvc_info_t));
1112 1113
1113 1114 for (i = NIU_MAJOR_HI; i > 0; i--) {
1114 1115 nxgep->niu_hsvc.hsvc_major = i;
1115 1116 for (j = NIU_MINOR_HI; j >= 0; j--) {
1116 1117 nxgep->niu_hsvc.hsvc_minor = j;
1117 1118 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1118 1119 "nxge_hsvc_register: %s: negotiating "
1119 1120 "hypervisor services revision %d "
1120 1121 "group: 0x%lx major: 0x%lx "
1121 1122 "minor: 0x%lx",
1122 1123 nxgep->niu_hsvc.hsvc_modname,
1123 1124 nxgep->niu_hsvc.hsvc_rev,
1124 1125 nxgep->niu_hsvc.hsvc_group,
1125 1126 nxgep->niu_hsvc.hsvc_major,
1126 1127 nxgep->niu_hsvc.hsvc_minor,
1127 1128 nxgep->niu_min_ver));
1128 1129
1129 1130 if ((status = hsvc_register(&nxgep->niu_hsvc,
1130 1131 &nxgep->niu_min_ver)) == 0) {
1131 1132 /* Use the supported minor */
1132 1133 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver;
1133 1134 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1134 1135 "nxge_hsvc_register: %s: negotiated "
1135 1136 "hypervisor services revision %d "
1136 1137 "group: 0x%lx major: 0x%lx "
1137 1138 "minor: 0x%lx (niu_min_ver 0x%lx)",
1138 1139 nxgep->niu_hsvc.hsvc_modname,
1139 1140 nxgep->niu_hsvc.hsvc_rev,
1140 1141 nxgep->niu_hsvc.hsvc_group,
1141 1142 nxgep->niu_hsvc.hsvc_major,
1142 1143 nxgep->niu_hsvc.hsvc_minor,
1143 1144 nxgep->niu_min_ver));
1144 1145
1145 1146 nxgep->niu_hsvc_available = B_TRUE;
1146 1147 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1147 1148 "<== nxge_hsvc_register: "
1148 1149 "NIU Hypervisor service enabled"));
1149 1150 return (DDI_SUCCESS);
1150 1151 }
1151 1152
1152 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1153 1154 "nxge_hsvc_register: %s: negotiated failed - "
1154 1155 "try lower major number "
1155 1156 "hypervisor services revision %d "
1156 1157 "group: 0x%lx major: 0x%lx minor: 0x%lx "
1157 1158 "errno: %d",
1158 1159 nxgep->niu_hsvc.hsvc_modname,
1159 1160 nxgep->niu_hsvc.hsvc_rev,
1160 1161 nxgep->niu_hsvc.hsvc_group,
1161 1162 nxgep->niu_hsvc.hsvc_major,
1162 1163 nxgep->niu_hsvc.hsvc_minor, status));
1163 1164 }
1164 1165 }
1165 1166
1166 1167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1167 1168 "nxge_hsvc_register: %s: cannot negotiate "
1168 1169 "hypervisor services revision %d group: 0x%lx "
1169 1170 "major: 0x%lx minor: 0x%lx errno: %d",
1170 1171 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
1171 1172 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
1172 1173 niu_hsvc.hsvc_minor, status));
1173 1174
1174 1175 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1175 1176 "<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
1176 1177
1177 1178 return (DDI_FAILURE);
1178 1179 }
1179 1180 #endif
1180 1181
1181 1182 static char n2_siu_name[] = "niu";
1182 1183
1183 1184 static nxge_status_t
1184 1185 nxge_map_regs(p_nxge_t nxgep)
1185 1186 {
1186 1187 int ddi_status = DDI_SUCCESS;
1187 1188 p_dev_regs_t dev_regs;
1188 1189 char buf[MAXPATHLEN + 1];
1189 1190 char *devname;
1190 1191 #ifdef NXGE_DEBUG
1191 1192 char *sysname;
1192 1193 #endif
1193 1194 off_t regsize;
1194 1195 nxge_status_t status = NXGE_OK;
1195 1196 #if !defined(_BIG_ENDIAN)
1196 1197 off_t pci_offset;
1197 1198 uint16_t pcie_devctl;
1198 1199 #endif
1199 1200
1200 1201 if (isLDOMguest(nxgep)) {
1201 1202 return (nxge_guest_regs_map(nxgep));
1202 1203 }
1203 1204
1204 1205 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
1205 1206 nxgep->dev_regs = NULL;
1206 1207 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
1207 1208 dev_regs->nxge_regh = NULL;
1208 1209 dev_regs->nxge_pciregh = NULL;
1209 1210 dev_regs->nxge_msix_regh = NULL;
1210 1211 dev_regs->nxge_vir_regh = NULL;
1211 1212 dev_regs->nxge_vir2_regh = NULL;
1212 1213 nxgep->niu_type = NIU_TYPE_NONE;
1213 1214
1214 1215 devname = ddi_pathname(nxgep->dip, buf);
1215 1216 ASSERT(strlen(devname) > 0);
1216 1217 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1217 1218 "nxge_map_regs: pathname devname %s", devname));
1218 1219
1219 1220 /*
1220 1221 * The driver is running on a N2-NIU system if devname is something
1221 1222 * like "/niu@80/network@0"
1222 1223 */
1223 1224 if (strstr(devname, n2_siu_name)) {
1224 1225 /* N2/NIU */
1225 1226 nxgep->niu_type = N2_NIU;
1226 1227 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1227 1228 "nxge_map_regs: N2/NIU devname %s", devname));
1228 1229 /*
1229 1230 * Get function number:
1230 1231 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1"
1231 1232 */
1232 1233 nxgep->function_num =
1233 1234 (devname[strlen(devname) -1] == '1' ? 1 : 0);
1234 1235 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1235 1236 "nxge_map_regs: N2/NIU function number %d",
1236 1237 nxgep->function_num));
1237 1238 } else {
1238 1239 int *prop_val;
1239 1240 uint_t prop_len;
1240 1241 uint8_t func_num;
1241 1242
1242 1243 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
1243 1244 0, "reg",
1244 1245 &prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1245 1246 NXGE_DEBUG_MSG((nxgep, VPD_CTL,
1246 1247 "Reg property not found"));
1247 1248 ddi_status = DDI_FAILURE;
1248 1249 goto nxge_map_regs_fail0;
1249 1250
1250 1251 } else {
1251 1252 func_num = (prop_val[0] >> 8) & 0x7;
1252 1253 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1253 1254 "Reg property found: fun # %d",
1254 1255 func_num));
1255 1256 nxgep->function_num = func_num;
1256 1257 if (isLDOMguest(nxgep)) {
1257 1258 nxgep->function_num /= 2;
1258 1259 return (NXGE_OK);
1259 1260 }
1260 1261 ddi_prop_free(prop_val);
1261 1262 }
1262 1263 }
1263 1264
1264 1265 switch (nxgep->niu_type) {
1265 1266 default:
1266 1267 (void) ddi_dev_regsize(nxgep->dip, 0, ®size);
1267 1268 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1268 1269 "nxge_map_regs: pci config size 0x%x", regsize));
1269 1270
1270 1271 ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
1271 1272 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
1272 1273 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
1273 1274 if (ddi_status != DDI_SUCCESS) {
1274 1275 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1275 1276 "ddi_map_regs, nxge bus config regs failed"));
1276 1277 goto nxge_map_regs_fail0;
1277 1278 }
1278 1279 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1279 1280 "nxge_map_reg: PCI config addr 0x%0llx "
1280 1281 " handle 0x%0llx", dev_regs->nxge_pciregp,
1281 1282 dev_regs->nxge_pciregh));
1282 1283 /*
1283 1284 * IMP IMP
1284 1285 * workaround for bit swapping bug in HW
1285 1286 * which ends up in no-snoop = yes
1286 1287 * resulting, in DMA not synched properly
1287 1288 */
1288 1289 #if !defined(_BIG_ENDIAN)
1289 1290 /* workarounds for x86 systems */
1290 1291 pci_offset = 0x80 + PCIE_DEVCTL;
1291 1292 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh,
1292 1293 pci_offset);
1293 1294 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP;
1294 1295 pcie_devctl |= PCIE_DEVCTL_RO_EN;
1295 1296 pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
1296 1297 pcie_devctl);
1297 1298 #endif
1298 1299
1299 1300 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1300 1301 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1301 1302 "nxge_map_regs: pio size 0x%x", regsize));
1302 1303 /* set up the device mapped register */
1303 1304 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1304 1305 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1305 1306 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1306 1307 if (ddi_status != DDI_SUCCESS) {
1307 1308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1308 1309 "ddi_map_regs for Neptune global reg failed"));
1309 1310 goto nxge_map_regs_fail1;
1310 1311 }
1311 1312
1312 1313 /* set up the msi/msi-x mapped register */
1313 1314 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1314 1315 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1315 1316 "nxge_map_regs: msix size 0x%x", regsize));
1316 1317 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1317 1318 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
1318 1319 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
1319 1320 if (ddi_status != DDI_SUCCESS) {
1320 1321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1321 1322 "ddi_map_regs for msi reg failed"));
1322 1323 goto nxge_map_regs_fail2;
1323 1324 }
1324 1325
1325 1326 /* set up the vio region mapped register */
1326 1327 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1327 1328 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1328 1329 "nxge_map_regs: vio size 0x%x", regsize));
1329 1330 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1330 1331 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1331 1332 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1332 1333
1333 1334 if (ddi_status != DDI_SUCCESS) {
1334 1335 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1335 1336 "ddi_map_regs for nxge vio reg failed"));
1336 1337 goto nxge_map_regs_fail3;
1337 1338 }
1338 1339 nxgep->dev_regs = dev_regs;
1339 1340
1340 1341 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
1341 1342 NPI_PCI_ADD_HANDLE_SET(nxgep,
1342 1343 (npi_reg_ptr_t)dev_regs->nxge_pciregp);
1343 1344 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
1344 1345 NPI_MSI_ADD_HANDLE_SET(nxgep,
1345 1346 (npi_reg_ptr_t)dev_regs->nxge_msix_regp);
1346 1347
1347 1348 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1348 1349 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1349 1350
1350 1351 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1351 1352 NPI_REG_ADD_HANDLE_SET(nxgep,
1352 1353 (npi_reg_ptr_t)dev_regs->nxge_regp);
1353 1354
1354 1355 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1355 1356 NPI_VREG_ADD_HANDLE_SET(nxgep,
1356 1357 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1357 1358
1358 1359 break;
1359 1360
1360 1361 case N2_NIU:
1361 1362 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1362 1363 /*
1363 1364 * Set up the device mapped register (FWARC 2006/556)
1364 1365 * (changed back to 1: reg starts at 1!)
1365 1366 */
1366 1367 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1367 1368 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1368 1369 "nxge_map_regs: dev size 0x%x", regsize));
1369 1370 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1370 1371 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1371 1372 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1372 1373
1373 1374 if (ddi_status != DDI_SUCCESS) {
1374 1375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1375 1376 "ddi_map_regs for N2/NIU, global reg failed "));
1376 1377 goto nxge_map_regs_fail1;
1377 1378 }
1378 1379
1379 1380 /* set up the first vio region mapped register */
1380 1381 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1381 1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1382 1383 "nxge_map_regs: vio (1) size 0x%x", regsize));
1383 1384 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1384 1385 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1385 1386 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1386 1387
1387 1388 if (ddi_status != DDI_SUCCESS) {
1388 1389 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1389 1390 "ddi_map_regs for nxge vio reg failed"));
1390 1391 goto nxge_map_regs_fail2;
1391 1392 }
1392 1393 /* set up the second vio region mapped register */
1393 1394 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1394 1395 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1395 1396 "nxge_map_regs: vio (3) size 0x%x", regsize));
1396 1397 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1397 1398 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1398 1399 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1399 1400
1400 1401 if (ddi_status != DDI_SUCCESS) {
1401 1402 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1402 1403 "ddi_map_regs for nxge vio2 reg failed"));
1403 1404 goto nxge_map_regs_fail3;
1404 1405 }
1405 1406 nxgep->dev_regs = dev_regs;
1406 1407
1407 1408 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1408 1409 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1409 1410
1410 1411 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1411 1412 NPI_REG_ADD_HANDLE_SET(nxgep,
1412 1413 (npi_reg_ptr_t)dev_regs->nxge_regp);
1413 1414
1414 1415 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1415 1416 NPI_VREG_ADD_HANDLE_SET(nxgep,
1416 1417 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1417 1418
1418 1419 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1419 1420 NPI_V2REG_ADD_HANDLE_SET(nxgep,
1420 1421 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1421 1422
1422 1423 break;
1423 1424 }
1424 1425
1425 1426 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1426 1427 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1427 1428
1428 1429 goto nxge_map_regs_exit;
1429 1430 nxge_map_regs_fail3:
1430 1431 if (dev_regs->nxge_msix_regh) {
1431 1432 ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1432 1433 }
1433 1434 if (dev_regs->nxge_vir_regh) {
1434 1435 ddi_regs_map_free(&dev_regs->nxge_regh);
1435 1436 }
1436 1437 nxge_map_regs_fail2:
1437 1438 if (dev_regs->nxge_regh) {
1438 1439 ddi_regs_map_free(&dev_regs->nxge_regh);
1439 1440 }
1440 1441 nxge_map_regs_fail1:
1441 1442 if (dev_regs->nxge_pciregh) {
1442 1443 ddi_regs_map_free(&dev_regs->nxge_pciregh);
1443 1444 }
1444 1445 nxge_map_regs_fail0:
1445 1446 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1446 1447 kmem_free(dev_regs, sizeof (dev_regs_t));
1447 1448
1448 1449 nxge_map_regs_exit:
1449 1450 if (ddi_status != DDI_SUCCESS)
1450 1451 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1451 1452 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1452 1453 return (status);
1453 1454 }
1454 1455
1455 1456 static void
1456 1457 nxge_unmap_regs(p_nxge_t nxgep)
1457 1458 {
1458 1459 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1459 1460
1460 1461 if (isLDOMguest(nxgep)) {
1461 1462 nxge_guest_regs_map_free(nxgep);
1462 1463 return;
1463 1464 }
1464 1465
1465 1466 if (nxgep->dev_regs) {
1466 1467 if (nxgep->dev_regs->nxge_pciregh) {
1467 1468 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1468 1469 "==> nxge_unmap_regs: bus"));
1469 1470 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1470 1471 nxgep->dev_regs->nxge_pciregh = NULL;
1471 1472 }
1472 1473 if (nxgep->dev_regs->nxge_regh) {
1473 1474 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1474 1475 "==> nxge_unmap_regs: device registers"));
1475 1476 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1476 1477 nxgep->dev_regs->nxge_regh = NULL;
1477 1478 }
1478 1479 if (nxgep->dev_regs->nxge_msix_regh) {
1479 1480 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1480 1481 "==> nxge_unmap_regs: device interrupts"));
1481 1482 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1482 1483 nxgep->dev_regs->nxge_msix_regh = NULL;
1483 1484 }
1484 1485 if (nxgep->dev_regs->nxge_vir_regh) {
1485 1486 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1486 1487 "==> nxge_unmap_regs: vio region"));
1487 1488 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1488 1489 nxgep->dev_regs->nxge_vir_regh = NULL;
1489 1490 }
1490 1491 if (nxgep->dev_regs->nxge_vir2_regh) {
1491 1492 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1492 1493 "==> nxge_unmap_regs: vio2 region"));
1493 1494 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1494 1495 nxgep->dev_regs->nxge_vir2_regh = NULL;
1495 1496 }
1496 1497
1497 1498 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1498 1499 nxgep->dev_regs = NULL;
1499 1500 }
1500 1501
1501 1502 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1502 1503 }
1503 1504
1504 1505 static nxge_status_t
1505 1506 nxge_setup_mutexes(p_nxge_t nxgep)
1506 1507 {
1507 1508 int ddi_status = DDI_SUCCESS;
1508 1509 nxge_status_t status = NXGE_OK;
1509 1510 nxge_classify_t *classify_ptr;
1510 1511 int partition;
1511 1512
1512 1513 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1513 1514
1514 1515 /*
1515 1516 * Get the interrupt cookie so the mutexes can be
1516 1517 * Initialized.
1517 1518 */
1518 1519 if (isLDOMguest(nxgep)) {
1519 1520 nxgep->interrupt_cookie = 0;
1520 1521 } else {
1521 1522 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1522 1523 &nxgep->interrupt_cookie);
1523 1524
1524 1525 if (ddi_status != DDI_SUCCESS) {
1525 1526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1526 1527 "<== nxge_setup_mutexes: failed 0x%x",
1527 1528 ddi_status));
1528 1529 goto nxge_setup_mutexes_exit;
1529 1530 }
1530 1531 }
1531 1532
1532 1533 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1533 1534 MUTEX_INIT(&nxgep->poll_lock, NULL,
1534 1535 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1535 1536
1536 1537 /*
1537 1538 * Initialize mutexes for this device.
1538 1539 */
1539 1540 MUTEX_INIT(nxgep->genlock, NULL,
1540 1541 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1541 1542 MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1542 1543 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1543 1544 MUTEX_INIT(&nxgep->mif_lock, NULL,
1544 1545 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1545 1546 MUTEX_INIT(&nxgep->group_lock, NULL,
1546 1547 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1547 1548 RW_INIT(&nxgep->filter_lock, NULL,
1548 1549 RW_DRIVER, (void *)nxgep->interrupt_cookie);
1549 1550
1550 1551 classify_ptr = &nxgep->classifier;
1551 1552 /*
1552 1553 * FFLP Mutexes are never used in interrupt context
1553 1554 * as fflp operation can take very long time to
1554 1555 * complete and hence not suitable to invoke from interrupt
1555 1556 * handlers.
1556 1557 */
1557 1558 MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1558 1559 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1559 1560 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1560 1561 MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1561 1562 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1562 1563 for (partition = 0; partition < MAX_PARTITION; partition++) {
1563 1564 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1564 1565 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1565 1566 }
1566 1567 }
1567 1568
1568 1569 nxge_setup_mutexes_exit:
1569 1570 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1570 1571 "<== nxge_setup_mutexes status = %x", status));
1571 1572
1572 1573 if (ddi_status != DDI_SUCCESS)
1573 1574 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1574 1575
1575 1576 return (status);
1576 1577 }
1577 1578
1578 1579 static void
1579 1580 nxge_destroy_mutexes(p_nxge_t nxgep)
1580 1581 {
1581 1582 int partition;
1582 1583 nxge_classify_t *classify_ptr;
1583 1584
1584 1585 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1585 1586 RW_DESTROY(&nxgep->filter_lock);
1586 1587 MUTEX_DESTROY(&nxgep->group_lock);
1587 1588 MUTEX_DESTROY(&nxgep->mif_lock);
1588 1589 MUTEX_DESTROY(&nxgep->ouraddr_lock);
1589 1590 MUTEX_DESTROY(nxgep->genlock);
1590 1591
1591 1592 classify_ptr = &nxgep->classifier;
1592 1593 MUTEX_DESTROY(&classify_ptr->tcam_lock);
1593 1594
1594 1595 /* Destroy all polling resources. */
1595 1596 MUTEX_DESTROY(&nxgep->poll_lock);
1596 1597 cv_destroy(&nxgep->poll_cv);
1597 1598
1598 1599 /* free data structures, based on HW type */
1599 1600 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1600 1601 MUTEX_DESTROY(&classify_ptr->fcram_lock);
1601 1602 for (partition = 0; partition < MAX_PARTITION; partition++) {
1602 1603 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1603 1604 }
1604 1605 }
1605 1606
1606 1607 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1607 1608 }
1608 1609
1609 1610 nxge_status_t
1610 1611 nxge_init(p_nxge_t nxgep)
1611 1612 {
1612 1613 nxge_status_t status = NXGE_OK;
1613 1614
1614 1615 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1615 1616
1616 1617 if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1617 1618 return (status);
1618 1619 }
1619 1620
1620 1621 /*
1621 1622 * Allocate system memory for the receive/transmit buffer blocks
1622 1623 * and receive/transmit descriptor rings.
1623 1624 */
1624 1625 status = nxge_alloc_mem_pool(nxgep);
1625 1626 if (status != NXGE_OK) {
1626 1627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1627 1628 goto nxge_init_fail1;
1628 1629 }
1629 1630
1630 1631 if (!isLDOMguest(nxgep)) {
1631 1632 /*
1632 1633 * Initialize and enable the TXC registers.
1633 1634 * (Globally enable the Tx controller,
1634 1635 * enable the port, configure the dma channel bitmap,
1635 1636 * configure the max burst size).
1636 1637 */
1637 1638 status = nxge_txc_init(nxgep);
1638 1639 if (status != NXGE_OK) {
1639 1640 NXGE_ERROR_MSG((nxgep,
1640 1641 NXGE_ERR_CTL, "init txc failed\n"));
1641 1642 goto nxge_init_fail2;
1642 1643 }
1643 1644 }
1644 1645
1645 1646 /*
1646 1647 * Initialize and enable TXDMA channels.
1647 1648 */
1648 1649 status = nxge_init_txdma_channels(nxgep);
1649 1650 if (status != NXGE_OK) {
1650 1651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1651 1652 goto nxge_init_fail3;
1652 1653 }
1653 1654
1654 1655 /*
1655 1656 * Initialize and enable RXDMA channels.
1656 1657 */
1657 1658 status = nxge_init_rxdma_channels(nxgep);
1658 1659 if (status != NXGE_OK) {
1659 1660 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1660 1661 goto nxge_init_fail4;
1661 1662 }
1662 1663
1663 1664 /*
1664 1665 * The guest domain is now done.
1665 1666 */
1666 1667 if (isLDOMguest(nxgep)) {
1667 1668 nxgep->drv_state |= STATE_HW_INITIALIZED;
1668 1669 goto nxge_init_exit;
1669 1670 }
1670 1671
1671 1672 /*
1672 1673 * Initialize TCAM and FCRAM (Neptune).
1673 1674 */
1674 1675 status = nxge_classify_init(nxgep);
1675 1676 if (status != NXGE_OK) {
1676 1677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1677 1678 goto nxge_init_fail5;
1678 1679 }
1679 1680
1680 1681 /*
1681 1682 * Initialize ZCP
1682 1683 */
1683 1684 status = nxge_zcp_init(nxgep);
1684 1685 if (status != NXGE_OK) {
1685 1686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1686 1687 goto nxge_init_fail5;
1687 1688 }
1688 1689
1689 1690 /*
1690 1691 * Initialize IPP.
1691 1692 */
1692 1693 status = nxge_ipp_init(nxgep);
1693 1694 if (status != NXGE_OK) {
1694 1695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1695 1696 goto nxge_init_fail5;
1696 1697 }
1697 1698
1698 1699 /*
1699 1700 * Initialize the MAC block.
1700 1701 */
1701 1702 status = nxge_mac_init(nxgep);
1702 1703 if (status != NXGE_OK) {
1703 1704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1704 1705 goto nxge_init_fail5;
1705 1706 }
1706 1707
1707 1708 /*
1708 1709 * Enable the interrrupts for DDI.
1709 1710 */
1710 1711 nxge_intrs_enable(nxgep);
1711 1712
1712 1713 nxgep->drv_state |= STATE_HW_INITIALIZED;
1713 1714
1714 1715 goto nxge_init_exit;
1715 1716
1716 1717 nxge_init_fail5:
1717 1718 nxge_uninit_rxdma_channels(nxgep);
1718 1719 nxge_init_fail4:
1719 1720 nxge_uninit_txdma_channels(nxgep);
1720 1721 nxge_init_fail3:
1721 1722 if (!isLDOMguest(nxgep)) {
1722 1723 (void) nxge_txc_uninit(nxgep);
1723 1724 }
1724 1725 nxge_init_fail2:
1725 1726 nxge_free_mem_pool(nxgep);
1726 1727 nxge_init_fail1:
1727 1728 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1728 1729 "<== nxge_init status (failed) = 0x%08x", status));
1729 1730 return (status);
1730 1731
1731 1732 nxge_init_exit:
1732 1733 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1733 1734 status));
1734 1735 return (status);
1735 1736 }
1736 1737
1737 1738
1738 1739 timeout_id_t
1739 1740 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1740 1741 {
1741 1742 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) {
1742 1743 return (timeout(func, (caddr_t)nxgep,
1743 1744 drv_usectohz(1000 * msec)));
1744 1745 }
1745 1746 return (NULL);
1746 1747 }
1747 1748
1748 1749 /*ARGSUSED*/
1749 1750 void
1750 1751 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1751 1752 {
1752 1753 if (timerid) {
1753 1754 (void) untimeout(timerid);
1754 1755 }
1755 1756 }
1756 1757
1757 1758 void
1758 1759 nxge_uninit(p_nxge_t nxgep)
1759 1760 {
1760 1761 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1761 1762
1762 1763 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1763 1764 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1764 1765 "==> nxge_uninit: not initialized"));
1765 1766 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1766 1767 "<== nxge_uninit"));
1767 1768 return;
1768 1769 }
1769 1770
1770 1771 if (!isLDOMguest(nxgep)) {
1771 1772 /*
1772 1773 * Reset the receive MAC side.
1773 1774 */
1774 1775 (void) nxge_rx_mac_disable(nxgep);
1775 1776
1776 1777 /*
1777 1778 * Drain the IPP.
1778 1779 */
1779 1780 (void) nxge_ipp_drain(nxgep);
1780 1781 }
1781 1782
1782 1783 /* stop timer */
1783 1784 if (nxgep->nxge_timerid) {
1784 1785 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1785 1786 nxgep->nxge_timerid = 0;
1786 1787 }
1787 1788
1788 1789 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1789 1790 (void) nxge_intr_hw_disable(nxgep);
1790 1791
1791 1792
1792 1793 /* Disable and soft reset the IPP */
1793 1794 if (!isLDOMguest(nxgep))
1794 1795 (void) nxge_ipp_disable(nxgep);
1795 1796
1796 1797 /* Free classification resources */
1797 1798 (void) nxge_classify_uninit(nxgep);
1798 1799
1799 1800 /*
1800 1801 * Reset the transmit/receive DMA side.
1801 1802 */
1802 1803 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1803 1804 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1804 1805
1805 1806 nxge_uninit_txdma_channels(nxgep);
1806 1807 nxge_uninit_rxdma_channels(nxgep);
1807 1808
1808 1809 /*
1809 1810 * Reset the transmit MAC side.
1810 1811 */
1811 1812 (void) nxge_tx_mac_disable(nxgep);
1812 1813
1813 1814 nxge_free_mem_pool(nxgep);
1814 1815
1815 1816 /*
1816 1817 * Start the timer if the reset flag is not set.
1817 1818 * If this reset flag is set, the link monitor
1818 1819 * will not be started in order to stop furthur bus
1819 1820 * activities coming from this interface.
1820 1821 * The driver will start the monitor function
1821 1822 * if the interface was initialized again later.
1822 1823 */
1823 1824 if (!nxge_peu_reset_enable) {
1824 1825 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1825 1826 }
1826 1827
1827 1828 nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1828 1829
1829 1830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1830 1831 "nxge_mblks_pending %d", nxge_mblks_pending));
1831 1832 }
1832 1833
1833 1834 void
1834 1835 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1835 1836 {
1836 1837 uint64_t reg;
1837 1838 uint64_t regdata;
1838 1839 int i, retry;
1839 1840
1840 1841 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t));
1841 1842 regdata = 0;
1842 1843 retry = 1;
1843 1844
1844 1845 for (i = 0; i < retry; i++) {
1845 1846 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data);
1846 1847 }
1847 1848 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t));
1848 1849 }
1849 1850
1850 1851 void
1851 1852 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1852 1853 {
1853 1854 uint64_t reg;
1854 1855 uint64_t buf[2];
1855 1856
1856 1857 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1857 1858 reg = buf[0];
1858 1859
1859 1860 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1860 1861 }
1861 1862
1862 1863 /*ARGSUSED*/
1863 1864 /*VARARGS*/
1864 1865 void
1865 1866 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1866 1867 {
1867 1868 char msg_buffer[1048];
1868 1869 char prefix_buffer[32];
1869 1870 int instance;
1870 1871 uint64_t debug_level;
1871 1872 int cmn_level = CE_CONT;
1872 1873 va_list ap;
1873 1874
1874 1875 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) {
1875 1876 /* In case a developer has changed nxge_debug_level. */
1876 1877 if (nxgep->nxge_debug_level != nxge_debug_level)
1877 1878 nxgep->nxge_debug_level = nxge_debug_level;
1878 1879 }
1879 1880
1880 1881 debug_level = (nxgep == NULL) ? nxge_debug_level :
1881 1882 nxgep->nxge_debug_level;
1882 1883
1883 1884 if ((level & debug_level) ||
1884 1885 (level == NXGE_NOTE) ||
1885 1886 (level == NXGE_ERR_CTL)) {
1886 1887 /* do the msg processing */
1887 1888 MUTEX_ENTER(&nxgedebuglock);
1888 1889
1889 1890 if ((level & NXGE_NOTE)) {
1890 1891 cmn_level = CE_NOTE;
1891 1892 }
1892 1893
1893 1894 if (level & NXGE_ERR_CTL) {
1894 1895 cmn_level = CE_WARN;
1895 1896 }
1896 1897
1897 1898 va_start(ap, fmt);
1898 1899 (void) vsprintf(msg_buffer, fmt, ap);
1899 1900 va_end(ap);
1900 1901 if (nxgep == NULL) {
1901 1902 instance = -1;
1902 1903 (void) sprintf(prefix_buffer, "%s :", "nxge");
1903 1904 } else {
1904 1905 instance = nxgep->instance;
1905 1906 (void) sprintf(prefix_buffer,
1906 1907 "%s%d :", "nxge", instance);
1907 1908 }
1908 1909
1909 1910 MUTEX_EXIT(&nxgedebuglock);
1910 1911 cmn_err(cmn_level, "!%s %s\n",
1911 1912 prefix_buffer, msg_buffer);
1912 1913
1913 1914 }
1914 1915 }
1915 1916
1916 1917 char *
1917 1918 nxge_dump_packet(char *addr, int size)
1918 1919 {
1919 1920 uchar_t *ap = (uchar_t *)addr;
1920 1921 int i;
1921 1922 static char etherbuf[1024];
1922 1923 char *cp = etherbuf;
1923 1924 char digits[] = "0123456789abcdef";
1924 1925
1925 1926 if (!size)
1926 1927 size = 60;
1927 1928
1928 1929 if (size > MAX_DUMP_SZ) {
1929 1930 /* Dump the leading bytes */
1930 1931 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1931 1932 if (*ap > 0x0f)
1932 1933 *cp++ = digits[*ap >> 4];
1933 1934 *cp++ = digits[*ap++ & 0xf];
1934 1935 *cp++ = ':';
1935 1936 }
1936 1937 for (i = 0; i < 20; i++)
1937 1938 *cp++ = '.';
1938 1939 /* Dump the last MAX_DUMP_SZ/2 bytes */
1939 1940 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1940 1941 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1941 1942 if (*ap > 0x0f)
1942 1943 *cp++ = digits[*ap >> 4];
1943 1944 *cp++ = digits[*ap++ & 0xf];
1944 1945 *cp++ = ':';
1945 1946 }
1946 1947 } else {
1947 1948 for (i = 0; i < size; i++) {
1948 1949 if (*ap > 0x0f)
1949 1950 *cp++ = digits[*ap >> 4];
1950 1951 *cp++ = digits[*ap++ & 0xf];
1951 1952 *cp++ = ':';
1952 1953 }
1953 1954 }
1954 1955 *--cp = 0;
1955 1956 return (etherbuf);
1956 1957 }
1957 1958
1958 1959 #ifdef NXGE_DEBUG
1959 1960 static void
1960 1961 nxge_test_map_regs(p_nxge_t nxgep)
1961 1962 {
1962 1963 ddi_acc_handle_t cfg_handle;
1963 1964 p_pci_cfg_t cfg_ptr;
1964 1965 ddi_acc_handle_t dev_handle;
1965 1966 char *dev_ptr;
1966 1967 ddi_acc_handle_t pci_config_handle;
1967 1968 uint32_t regval;
1968 1969 int i;
1969 1970
1970 1971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1971 1972
1972 1973 dev_handle = nxgep->dev_regs->nxge_regh;
1973 1974 dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1974 1975
1975 1976 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1976 1977 cfg_handle = nxgep->dev_regs->nxge_pciregh;
1977 1978 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1978 1979
1979 1980 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1980 1981 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1981 1982 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1982 1983 "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1983 1984 &cfg_ptr->vendorid));
1984 1985 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1985 1986 "\tvendorid 0x%x devid 0x%x",
1986 1987 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1987 1988 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0)));
1988 1989 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1989 1990 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1990 1991 "bar1c 0x%x",
1991 1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0),
1992 1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1993 1994 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1994 1995 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1995 1996 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1996 1997 "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1997 1998 "base 28 0x%x bar2c 0x%x\n",
1998 1999 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1999 2000 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
2000 2001 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
2001 2002 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
2002 2003 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2003 2004 "\nNeptune PCI BAR: base30 0x%x\n",
2004 2005 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
2005 2006
2006 2007 cfg_handle = nxgep->dev_regs->nxge_pciregh;
2007 2008 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
2008 2009 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2009 2010 "first 0x%llx second 0x%llx third 0x%llx "
2010 2011 "last 0x%llx ",
2011 2012 NXGE_PIO_READ64(dev_handle,
2012 2013 (uint64_t *)(dev_ptr + 0), 0),
2013 2014 NXGE_PIO_READ64(dev_handle,
2014 2015 (uint64_t *)(dev_ptr + 8), 0),
2015 2016 NXGE_PIO_READ64(dev_handle,
2016 2017 (uint64_t *)(dev_ptr + 16), 0),
2017 2018 NXGE_PIO_READ64(cfg_handle,
2018 2019 (uint64_t *)(dev_ptr + 24), 0)));
2019 2020 }
2020 2021 }
2021 2022
2022 2023 #endif
2023 2024
2024 2025 static void
2025 2026 nxge_suspend(p_nxge_t nxgep)
2026 2027 {
2027 2028 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
2028 2029
2029 2030 nxge_intrs_disable(nxgep);
2030 2031 nxge_destroy_dev(nxgep);
2031 2032
2032 2033 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
2033 2034 }
2034 2035
2035 2036 static nxge_status_t
2036 2037 nxge_resume(p_nxge_t nxgep)
2037 2038 {
2038 2039 nxge_status_t status = NXGE_OK;
2039 2040
2040 2041 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
2041 2042
2042 2043 nxgep->suspended = DDI_RESUME;
2043 2044 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
2044 2045 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
2045 2046 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
2046 2047 (void) nxge_rx_mac_enable(nxgep);
2047 2048 (void) nxge_tx_mac_enable(nxgep);
2048 2049 nxge_intrs_enable(nxgep);
2049 2050 nxgep->suspended = 0;
2050 2051
2051 2052 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2052 2053 "<== nxge_resume status = 0x%x", status));
2053 2054 return (status);
2054 2055 }
2055 2056
2056 2057 static nxge_status_t
2057 2058 nxge_setup_dev(p_nxge_t nxgep)
2058 2059 {
2059 2060 nxge_status_t status = NXGE_OK;
2060 2061
2061 2062 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
2062 2063 nxgep->mac.portnum));
2063 2064
2064 2065 status = nxge_link_init(nxgep);
2065 2066
2066 2067 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
2067 2068 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2068 2069 "port%d Bad register acc handle", nxgep->mac.portnum));
2069 2070 status = NXGE_ERROR;
2070 2071 }
2071 2072
2072 2073 if (status != NXGE_OK) {
2073 2074 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2074 2075 " nxge_setup_dev status "
2075 2076 "(xcvr init 0x%08x)", status));
2076 2077 goto nxge_setup_dev_exit;
2077 2078 }
2078 2079
2079 2080 nxge_setup_dev_exit:
2080 2081 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2081 2082 "<== nxge_setup_dev port %d status = 0x%08x",
2082 2083 nxgep->mac.portnum, status));
2083 2084
2084 2085 return (status);
2085 2086 }
2086 2087
2087 2088 static void
2088 2089 nxge_destroy_dev(p_nxge_t nxgep)
2089 2090 {
2090 2091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
2091 2092
2092 2093 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
2093 2094
2094 2095 (void) nxge_hw_stop(nxgep);
2095 2096
2096 2097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
2097 2098 }
2098 2099
2099 2100 static nxge_status_t
2100 2101 nxge_setup_system_dma_pages(p_nxge_t nxgep)
2101 2102 {
2102 2103 int ddi_status = DDI_SUCCESS;
2103 2104 uint_t count;
2104 2105 ddi_dma_cookie_t cookie;
2105 2106 uint_t iommu_pagesize;
2106 2107 nxge_status_t status = NXGE_OK;
2107 2108
2108 2109 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
2109 2110 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
2110 2111 if (nxgep->niu_type != N2_NIU) {
2111 2112 iommu_pagesize = dvma_pagesize(nxgep->dip);
2112 2113 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2113 2114 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2114 2115 " default_block_size %d iommu_pagesize %d",
2115 2116 nxgep->sys_page_sz,
2116 2117 ddi_ptob(nxgep->dip, (ulong_t)1),
2117 2118 nxgep->rx_default_block_size,
2118 2119 iommu_pagesize));
2119 2120
2120 2121 if (iommu_pagesize != 0) {
2121 2122 if (nxgep->sys_page_sz == iommu_pagesize) {
2122 2123 if (iommu_pagesize > 0x4000)
2123 2124 nxgep->sys_page_sz = 0x4000;
2124 2125 } else {
2125 2126 if (nxgep->sys_page_sz > iommu_pagesize)
2126 2127 nxgep->sys_page_sz = iommu_pagesize;
2127 2128 }
2128 2129 }
2129 2130 }
2130 2131 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2131 2132 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2132 2133 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2133 2134 "default_block_size %d page mask %d",
2134 2135 nxgep->sys_page_sz,
2135 2136 ddi_ptob(nxgep->dip, (ulong_t)1),
2136 2137 nxgep->rx_default_block_size,
2137 2138 nxgep->sys_page_mask));
2138 2139
2139 2140
2140 2141 switch (nxgep->sys_page_sz) {
2141 2142 default:
2142 2143 nxgep->sys_page_sz = 0x1000;
2143 2144 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2144 2145 nxgep->rx_default_block_size = 0x1000;
2145 2146 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2146 2147 break;
2147 2148 case 0x1000:
2148 2149 nxgep->rx_default_block_size = 0x1000;
2149 2150 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2150 2151 break;
2151 2152 case 0x2000:
2152 2153 nxgep->rx_default_block_size = 0x2000;
2153 2154 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2154 2155 break;
2155 2156 case 0x4000:
2156 2157 nxgep->rx_default_block_size = 0x4000;
2157 2158 nxgep->rx_bksize_code = RBR_BKSIZE_16K;
2158 2159 break;
2159 2160 case 0x8000:
2160 2161 nxgep->rx_default_block_size = 0x8000;
2161 2162 nxgep->rx_bksize_code = RBR_BKSIZE_32K;
2162 2163 break;
2163 2164 }
2164 2165
2165 2166 #ifndef USE_RX_BIG_BUF
2166 2167 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
2167 2168 #else
2168 2169 nxgep->rx_default_block_size = 0x2000;
2169 2170 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2170 2171 #endif
2171 2172 /*
2172 2173 * Get the system DMA burst size.
2173 2174 */
2174 2175 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2175 2176 DDI_DMA_DONTWAIT, 0,
2176 2177 &nxgep->dmasparehandle);
2177 2178 if (ddi_status != DDI_SUCCESS) {
2178 2179 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2179 2180 "ddi_dma_alloc_handle: failed "
2180 2181 " status 0x%x", ddi_status));
2181 2182 goto nxge_get_soft_properties_exit;
2182 2183 }
2183 2184
2184 2185 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
2185 2186 (caddr_t)nxgep->dmasparehandle,
2186 2187 sizeof (nxgep->dmasparehandle),
2187 2188 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2188 2189 DDI_DMA_DONTWAIT, 0,
2189 2190 &cookie, &count);
2190 2191 if (ddi_status != DDI_DMA_MAPPED) {
2191 2192 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2192 2193 "Binding spare handle to find system"
2193 2194 " burstsize failed."));
2194 2195 ddi_status = DDI_FAILURE;
2195 2196 goto nxge_get_soft_properties_fail1;
2196 2197 }
2197 2198
2198 2199 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
2199 2200 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
2200 2201
2201 2202 nxge_get_soft_properties_fail1:
2202 2203 ddi_dma_free_handle(&nxgep->dmasparehandle);
2203 2204
2204 2205 nxge_get_soft_properties_exit:
2205 2206
2206 2207 if (ddi_status != DDI_SUCCESS)
2207 2208 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2208 2209
2209 2210 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2210 2211 "<== nxge_setup_system_dma_pages status = 0x%08x", status));
2211 2212 return (status);
2212 2213 }
2213 2214
2214 2215 static nxge_status_t
2215 2216 nxge_alloc_mem_pool(p_nxge_t nxgep)
2216 2217 {
2217 2218 nxge_status_t status = NXGE_OK;
2218 2219
2219 2220 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
2220 2221
2221 2222 status = nxge_alloc_rx_mem_pool(nxgep);
2222 2223 if (status != NXGE_OK) {
2223 2224 return (NXGE_ERROR);
2224 2225 }
2225 2226
2226 2227 status = nxge_alloc_tx_mem_pool(nxgep);
2227 2228 if (status != NXGE_OK) {
2228 2229 nxge_free_rx_mem_pool(nxgep);
2229 2230 return (NXGE_ERROR);
2230 2231 }
2231 2232
2232 2233 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
2233 2234 return (NXGE_OK);
2234 2235 }
2235 2236
2236 2237 static void
2237 2238 nxge_free_mem_pool(p_nxge_t nxgep)
2238 2239 {
2239 2240 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
2240 2241
2241 2242 nxge_free_rx_mem_pool(nxgep);
2242 2243 nxge_free_tx_mem_pool(nxgep);
2243 2244
2244 2245 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
2245 2246 }
2246 2247
2247 2248 nxge_status_t
2248 2249 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
2249 2250 {
2250 2251 uint32_t rdc_max;
2251 2252 p_nxge_dma_pt_cfg_t p_all_cfgp;
2252 2253 p_nxge_hw_pt_cfg_t p_cfgp;
2253 2254 p_nxge_dma_pool_t dma_poolp;
2254 2255 p_nxge_dma_common_t *dma_buf_p;
2255 2256 p_nxge_dma_pool_t dma_cntl_poolp;
2256 2257 p_nxge_dma_common_t *dma_cntl_p;
2257 2258 uint32_t *num_chunks; /* per dma */
2258 2259 nxge_status_t status = NXGE_OK;
2259 2260
2260 2261 uint32_t nxge_port_rbr_size;
2261 2262 uint32_t nxge_port_rbr_spare_size;
2262 2263 uint32_t nxge_port_rcr_size;
2263 2264 uint32_t rx_cntl_alloc_size;
2264 2265
2265 2266 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
2266 2267
2267 2268 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2268 2269 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2269 2270 rdc_max = NXGE_MAX_RDCS;
2270 2271
2271 2272 /*
2272 2273 * Allocate memory for the common DMA data structures.
2273 2274 */
2274 2275 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2275 2276 KM_SLEEP);
2276 2277 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2277 2278 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2278 2279
2279 2280 dma_cntl_poolp = (p_nxge_dma_pool_t)
2280 2281 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2281 2282 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2282 2283 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2283 2284
2284 2285 num_chunks = (uint32_t *)KMEM_ZALLOC(
2285 2286 sizeof (uint32_t) * rdc_max, KM_SLEEP);
2286 2287
2287 2288 /*
2288 2289 * Assume that each DMA channel will be configured with
2289 2290 * the default block size.
2290 2291 * rbr block counts are modulo the batch count (16).
2291 2292 */
2292 2293 nxge_port_rbr_size = p_all_cfgp->rbr_size;
2293 2294 nxge_port_rcr_size = p_all_cfgp->rcr_size;
2294 2295
2295 2296 if (!nxge_port_rbr_size) {
2296 2297 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
2297 2298 }
2298 2299 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
2299 2300 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
2300 2301 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
2301 2302 }
2302 2303
2303 2304 p_all_cfgp->rbr_size = nxge_port_rbr_size;
2304 2305 nxge_port_rbr_spare_size = nxge_rbr_spare_size;
2305 2306
2306 2307 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
2307 2308 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
2308 2309 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
2309 2310 }
2310 2311 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
2311 2312 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2312 2313 "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2313 2314 "set to default %d",
2314 2315 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
2315 2316 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
2316 2317 }
2317 2318 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
2318 2319 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2319 2320 "nxge_alloc_rx_mem_pool: RCR too high %d, "
2320 2321 "set to default %d",
2321 2322 nxge_port_rcr_size, RCR_DEFAULT_MAX));
2322 2323 nxge_port_rcr_size = RCR_DEFAULT_MAX;
2323 2324 }
2324 2325
2325 2326 /*
2326 2327 * N2/NIU has limitation on the descriptor sizes (contiguous
2327 2328 * memory allocation on data buffers to 4M (contig_mem_alloc)
2328 2329 * and little endian for control buffers (must use the ddi/dki mem alloc
2329 2330 * function).
2330 2331 */
2331 2332 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2332 2333 if (nxgep->niu_type == N2_NIU) {
2333 2334 nxge_port_rbr_spare_size = 0;
2334 2335 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
2335 2336 (!ISP2(nxge_port_rbr_size))) {
2336 2337 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
2337 2338 }
2338 2339 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
2339 2340 (!ISP2(nxge_port_rcr_size))) {
2340 2341 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
2341 2342 }
2342 2343 }
2343 2344 #endif
2344 2345
2345 2346 /*
2346 2347 * Addresses of receive block ring, receive completion ring and the
2347 2348 * mailbox must be all cache-aligned (64 bytes).
2348 2349 */
2349 2350 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
2350 2351 rx_cntl_alloc_size *= (sizeof (rx_desc_t));
2351 2352 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
2352 2353 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
2353 2354
2354 2355 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
2355 2356 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2356 2357 "nxge_port_rcr_size = %d "
2357 2358 "rx_cntl_alloc_size = %d",
2358 2359 nxge_port_rbr_size, nxge_port_rbr_spare_size,
2359 2360 nxge_port_rcr_size,
2360 2361 rx_cntl_alloc_size));
2361 2362
2362 2363 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2363 2364 if (nxgep->niu_type == N2_NIU) {
2364 2365 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size *
2365 2366 (nxge_port_rbr_size + nxge_port_rbr_spare_size));
2366 2367
2367 2368 if (!ISP2(rx_buf_alloc_size)) {
2368 2369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2369 2370 "==> nxge_alloc_rx_mem_pool: "
2370 2371 " must be power of 2"));
2371 2372 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2372 2373 goto nxge_alloc_rx_mem_pool_exit;
2373 2374 }
2374 2375
2375 2376 if (rx_buf_alloc_size > (1 << 22)) {
2376 2377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2377 2378 "==> nxge_alloc_rx_mem_pool: "
2378 2379 " limit size to 4M"));
2379 2380 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2380 2381 goto nxge_alloc_rx_mem_pool_exit;
2381 2382 }
2382 2383
2383 2384 if (rx_cntl_alloc_size < 0x2000) {
2384 2385 rx_cntl_alloc_size = 0x2000;
2385 2386 }
2386 2387 }
2387 2388 #endif
2388 2389 nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2389 2390 nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2390 2391 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size;
2391 2392 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size;
2392 2393
2393 2394 dma_poolp->ndmas = p_cfgp->max_rdcs;
2394 2395 dma_poolp->num_chunks = num_chunks;
2395 2396 dma_poolp->buf_allocated = B_TRUE;
2396 2397 nxgep->rx_buf_pool_p = dma_poolp;
2397 2398 dma_poolp->dma_buf_pool_p = dma_buf_p;
2398 2399
2399 2400 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs;
2400 2401 dma_cntl_poolp->buf_allocated = B_TRUE;
2401 2402 nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2402 2403 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2403 2404
2404 2405 /* Allocate the receive rings, too. */
2405 2406 nxgep->rx_rbr_rings =
2406 2407 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2407 2408 nxgep->rx_rbr_rings->rbr_rings =
2408 2409 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP);
2409 2410 nxgep->rx_rcr_rings =
2410 2411 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2411 2412 nxgep->rx_rcr_rings->rcr_rings =
2412 2413 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP);
2413 2414 nxgep->rx_mbox_areas_p =
2414 2415 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2415 2416 nxgep->rx_mbox_areas_p->rxmbox_areas =
2416 2417 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP);
2417 2418
2418 2419 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas =
2419 2420 p_cfgp->max_rdcs;
2420 2421
2421 2422 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2422 2423 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2423 2424
2424 2425 nxge_alloc_rx_mem_pool_exit:
2425 2426 return (status);
2426 2427 }
2427 2428
2428 2429 /*
2429 2430 * nxge_alloc_rxb
2430 2431 *
2431 2432 * Allocate buffers for an RDC.
2432 2433 *
2433 2434 * Arguments:
2434 2435 * nxgep
2435 2436 * channel The channel to map into our kernel space.
2436 2437 *
2437 2438 * Notes:
2438 2439 *
2439 2440 * NPI function calls:
2440 2441 *
2441 2442 * NXGE function calls:
2442 2443 *
2443 2444 * Registers accessed:
2444 2445 *
2445 2446 * Context:
2446 2447 *
2447 2448 * Taking apart:
2448 2449 *
2449 2450 * Open questions:
2450 2451 *
2451 2452 */
2452 2453 nxge_status_t
2453 2454 nxge_alloc_rxb(
2454 2455 p_nxge_t nxgep,
2455 2456 int channel)
2456 2457 {
2457 2458 size_t rx_buf_alloc_size;
2458 2459 nxge_status_t status = NXGE_OK;
2459 2460
2460 2461 nxge_dma_common_t **data;
2461 2462 nxge_dma_common_t **control;
2462 2463 uint32_t *num_chunks;
2463 2464
2464 2465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2465 2466
2466 2467 /*
2467 2468 * Allocate memory for the receive buffers and descriptor rings.
2468 2469 * Replace these allocation functions with the interface functions
2469 2470 * provided by the partition manager if/when they are available.
2470 2471 */
2471 2472
2472 2473 /*
2473 2474 * Allocate memory for the receive buffer blocks.
2474 2475 */
2475 2476 rx_buf_alloc_size = (nxgep->rx_default_block_size *
2476 2477 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size));
2477 2478
2478 2479 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2479 2480 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel];
2480 2481
2481 2482 if ((status = nxge_alloc_rx_buf_dma(
2482 2483 nxgep, channel, data, rx_buf_alloc_size,
2483 2484 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) {
2484 2485 return (status);
2485 2486 }
2486 2487
2487 2488 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): "
2488 2489 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data));
2489 2490
2490 2491 /*
2491 2492 * Allocate memory for descriptor rings and mailbox.
2492 2493 */
2493 2494 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2494 2495
2495 2496 if ((status = nxge_alloc_rx_cntl_dma(
2496 2497 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size))
2497 2498 != NXGE_OK) {
2498 2499 nxge_free_rx_cntl_dma(nxgep, *control);
2499 2500 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE;
2500 2501 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks);
2501 2502 return (status);
2502 2503 }
2503 2504
2504 2505 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2505 2506 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2506 2507
2507 2508 return (status);
2508 2509 }
2509 2510
2510 2511 void
2511 2512 nxge_free_rxb(
2512 2513 p_nxge_t nxgep,
2513 2514 int channel)
2514 2515 {
2515 2516 nxge_dma_common_t *data;
2516 2517 nxge_dma_common_t *control;
2517 2518 uint32_t num_chunks;
2518 2519
2519 2520 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2520 2521
2521 2522 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2522 2523 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
2523 2524 nxge_free_rx_buf_dma(nxgep, data, num_chunks);
2524 2525
2525 2526 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2526 2527 nxgep->rx_buf_pool_p->num_chunks[channel] = 0;
2527 2528
2528 2529 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2529 2530 nxge_free_rx_cntl_dma(nxgep, control);
2530 2531
2531 2532 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2532 2533
2533 2534 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2534 2535 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2535 2536
2536 2537 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb"));
2537 2538 }
2538 2539
2539 2540 static void
2540 2541 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2541 2542 {
2542 2543 int rdc_max = NXGE_MAX_RDCS;
2543 2544
2544 2545 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2545 2546
2546 2547 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) {
2547 2548 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2548 2549 "<== nxge_free_rx_mem_pool "
2549 2550 "(null rx buf pool or buf not allocated"));
2550 2551 return;
2551 2552 }
2552 2553 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) {
2553 2554 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2554 2555 "<== nxge_free_rx_mem_pool "
2555 2556 "(null rx cntl buf pool or cntl buf not allocated"));
2556 2557 return;
2557 2558 }
2558 2559
2559 2560 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p,
2560 2561 sizeof (p_nxge_dma_common_t) * rdc_max);
2561 2562 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t));
2562 2563
2563 2564 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks,
2564 2565 sizeof (uint32_t) * rdc_max);
2565 2566 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p,
2566 2567 sizeof (p_nxge_dma_common_t) * rdc_max);
2567 2568 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t));
2568 2569
2569 2570 nxgep->rx_buf_pool_p = 0;
2570 2571 nxgep->rx_cntl_pool_p = 0;
2571 2572
2572 2573 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings,
2573 2574 sizeof (p_rx_rbr_ring_t) * rdc_max);
2574 2575 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t));
2575 2576 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings,
2576 2577 sizeof (p_rx_rcr_ring_t) * rdc_max);
2577 2578 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t));
2578 2579 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas,
2579 2580 sizeof (p_rx_mbox_t) * rdc_max);
2580 2581 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2581 2582
2582 2583 nxgep->rx_rbr_rings = 0;
2583 2584 nxgep->rx_rcr_rings = 0;
2584 2585 nxgep->rx_mbox_areas_p = 0;
2585 2586
2586 2587 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2587 2588 }
2588 2589
2589 2590
2590 2591 static nxge_status_t
2591 2592 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2592 2593 p_nxge_dma_common_t *dmap,
2593 2594 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2594 2595 {
2595 2596 p_nxge_dma_common_t rx_dmap;
2596 2597 nxge_status_t status = NXGE_OK;
2597 2598 size_t total_alloc_size;
2598 2599 size_t allocated = 0;
2599 2600 int i, size_index, array_size;
2600 2601 boolean_t use_kmem_alloc = B_FALSE;
2601 2602
2602 2603 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2603 2604
2604 2605 rx_dmap = (p_nxge_dma_common_t)
2605 2606 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2606 2607 KM_SLEEP);
2607 2608
2608 2609 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2609 2610 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2610 2611 dma_channel, alloc_size, block_size, dmap));
2611 2612
2612 2613 total_alloc_size = alloc_size;
2613 2614
2614 2615 #if defined(RX_USE_RECLAIM_POST)
2615 2616 total_alloc_size = alloc_size + alloc_size/4;
2616 2617 #endif
2617 2618
2618 2619 i = 0;
2619 2620 size_index = 0;
2620 2621 array_size = sizeof (alloc_sizes)/sizeof (size_t);
2621 2622 while ((size_index < array_size) &&
2622 2623 (alloc_sizes[size_index] < alloc_size))
2623 2624 size_index++;
2624 2625 if (size_index >= array_size) {
2625 2626 size_index = array_size - 1;
2626 2627 }
2627 2628
2628 2629 /* For Neptune, use kmem_alloc if the kmem flag is set. */
2629 2630 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) {
2630 2631 use_kmem_alloc = B_TRUE;
2631 2632 #if defined(__i386) || defined(__amd64)
2632 2633 size_index = 0;
2633 2634 #endif
2634 2635 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2635 2636 "==> nxge_alloc_rx_buf_dma: "
2636 2637 "Neptune use kmem_alloc() - size_index %d",
2637 2638 size_index));
2638 2639 }
2639 2640
2640 2641 while ((allocated < total_alloc_size) &&
2641 2642 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2642 2643 rx_dmap[i].dma_chunk_index = i;
2643 2644 rx_dmap[i].block_size = block_size;
2644 2645 rx_dmap[i].alength = alloc_sizes[size_index];
2645 2646 rx_dmap[i].orig_alength = rx_dmap[i].alength;
2646 2647 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2647 2648 rx_dmap[i].dma_channel = dma_channel;
2648 2649 rx_dmap[i].contig_alloc_type = B_FALSE;
2649 2650 rx_dmap[i].kmem_alloc_type = B_FALSE;
2650 2651 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC;
2651 2652
2652 2653 /*
2653 2654 * N2/NIU: data buffers must be contiguous as the driver
2654 2655 * needs to call Hypervisor api to set up
2655 2656 * logical pages.
2656 2657 */
2657 2658 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2658 2659 rx_dmap[i].contig_alloc_type = B_TRUE;
2659 2660 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC;
2660 2661 } else if (use_kmem_alloc) {
2661 2662 /* For Neptune, use kmem_alloc */
2662 2663 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2663 2664 "==> nxge_alloc_rx_buf_dma: "
2664 2665 "Neptune use kmem_alloc()"));
2665 2666 rx_dmap[i].kmem_alloc_type = B_TRUE;
2666 2667 rx_dmap[i].buf_alloc_type = KMEM_ALLOC;
2667 2668 }
2668 2669
2669 2670 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2670 2671 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2671 2672 "i %d nblocks %d alength %d",
2672 2673 dma_channel, i, &rx_dmap[i], block_size,
2673 2674 i, rx_dmap[i].nblocks,
2674 2675 rx_dmap[i].alength));
2675 2676 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2676 2677 &nxge_rx_dma_attr,
2677 2678 rx_dmap[i].alength,
2678 2679 &nxge_dev_buf_dma_acc_attr,
2679 2680 DDI_DMA_READ | DDI_DMA_STREAMING,
2680 2681 (p_nxge_dma_common_t)(&rx_dmap[i]));
2681 2682 if (status != NXGE_OK) {
2682 2683 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2683 2684 "nxge_alloc_rx_buf_dma: Alloc Failed: "
2684 2685 "dma %d size_index %d size requested %d",
2685 2686 dma_channel,
2686 2687 size_index,
2687 2688 rx_dmap[i].alength));
2688 2689 size_index--;
2689 2690 } else {
2690 2691 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED;
2691 2692 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2692 2693 " nxge_alloc_rx_buf_dma DONE alloc mem: "
2693 2694 "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2694 2695 "buf_alloc_state %d alloc_type %d",
2695 2696 dma_channel,
2696 2697 &rx_dmap[i],
2697 2698 rx_dmap[i].kaddrp,
2698 2699 rx_dmap[i].alength,
2699 2700 rx_dmap[i].buf_alloc_state,
2700 2701 rx_dmap[i].buf_alloc_type));
2701 2702 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2702 2703 " alloc_rx_buf_dma allocated rdc %d "
2703 2704 "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2704 2705 dma_channel, i, rx_dmap[i].alength,
2705 2706 rx_dmap[i].ioaddr_pp, &rx_dmap[i],
2706 2707 rx_dmap[i].kaddrp));
2707 2708 i++;
2708 2709 allocated += alloc_sizes[size_index];
2709 2710 }
2710 2711 }
2711 2712
2712 2713 if (allocated < total_alloc_size) {
2713 2714 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2714 2715 "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2715 2716 "allocated 0x%x requested 0x%x",
2716 2717 dma_channel,
2717 2718 allocated, total_alloc_size));
2718 2719 status = NXGE_ERROR;
2719 2720 goto nxge_alloc_rx_mem_fail1;
2720 2721 }
2721 2722
2722 2723 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2723 2724 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2724 2725 "allocated 0x%x requested 0x%x",
2725 2726 dma_channel,
2726 2727 allocated, total_alloc_size));
2727 2728
2728 2729 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2729 2730 " alloc_rx_buf_dma rdc %d allocated %d chunks",
2730 2731 dma_channel, i));
2731 2732 *num_chunks = i;
2732 2733 *dmap = rx_dmap;
2733 2734
2734 2735 goto nxge_alloc_rx_mem_exit;
2735 2736
2736 2737 nxge_alloc_rx_mem_fail1:
2737 2738 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2738 2739
2739 2740 nxge_alloc_rx_mem_exit:
2740 2741 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2741 2742 "<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2742 2743
2743 2744 return (status);
2744 2745 }
2745 2746
2746 2747 /*ARGSUSED*/
2747 2748 static void
2748 2749 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2749 2750 uint32_t num_chunks)
2750 2751 {
2751 2752 int i;
2752 2753
2753 2754 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2754 2755 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2755 2756
2756 2757 if (dmap == 0)
2757 2758 return;
2758 2759
2759 2760 for (i = 0; i < num_chunks; i++) {
2760 2761 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2761 2762 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2762 2763 i, dmap));
2763 2764 nxge_dma_free_rx_data_buf(dmap++);
2764 2765 }
2765 2766
2766 2767 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2767 2768 }
2768 2769
2769 2770 /*ARGSUSED*/
2770 2771 static nxge_status_t
2771 2772 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2772 2773 p_nxge_dma_common_t *dmap, size_t size)
2773 2774 {
2774 2775 p_nxge_dma_common_t rx_dmap;
2775 2776 nxge_status_t status = NXGE_OK;
2776 2777
2777 2778 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2778 2779
2779 2780 rx_dmap = (p_nxge_dma_common_t)
2780 2781 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2781 2782
2782 2783 rx_dmap->contig_alloc_type = B_FALSE;
2783 2784 rx_dmap->kmem_alloc_type = B_FALSE;
2784 2785
2785 2786 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2786 2787 &nxge_desc_dma_attr,
2787 2788 size,
2788 2789 &nxge_dev_desc_dma_acc_attr,
2789 2790 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2790 2791 rx_dmap);
2791 2792 if (status != NXGE_OK) {
2792 2793 goto nxge_alloc_rx_cntl_dma_fail1;
2793 2794 }
2794 2795
2795 2796 *dmap = rx_dmap;
2796 2797 goto nxge_alloc_rx_cntl_dma_exit;
2797 2798
2798 2799 nxge_alloc_rx_cntl_dma_fail1:
2799 2800 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2800 2801
2801 2802 nxge_alloc_rx_cntl_dma_exit:
2802 2803 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2803 2804 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2804 2805
2805 2806 return (status);
2806 2807 }
2807 2808
2808 2809 /*ARGSUSED*/
2809 2810 static void
2810 2811 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2811 2812 {
2812 2813 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2813 2814
2814 2815 if (dmap == 0)
2815 2816 return;
2816 2817
2817 2818 nxge_dma_mem_free(dmap);
2818 2819
2819 2820 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2820 2821 }
2821 2822
2822 2823 typedef struct {
2823 2824 size_t tx_size;
2824 2825 size_t cr_size;
2825 2826 size_t threshhold;
2826 2827 } nxge_tdc_sizes_t;
2827 2828
2828 2829 static
2829 2830 nxge_status_t
2830 2831 nxge_tdc_sizes(
2831 2832 nxge_t *nxgep,
2832 2833 nxge_tdc_sizes_t *sizes)
2833 2834 {
2834 2835 uint32_t threshhold; /* The bcopy() threshhold */
2835 2836 size_t tx_size; /* Transmit buffer size */
2836 2837 size_t cr_size; /* Completion ring size */
2837 2838
2838 2839 /*
2839 2840 * Assume that each DMA channel will be configured with the
2840 2841 * default transmit buffer size for copying transmit data.
2841 2842 * (If a packet is bigger than this, it will not be copied.)
2842 2843 */
2843 2844 if (nxgep->niu_type == N2_NIU) {
2844 2845 threshhold = TX_BCOPY_SIZE;
2845 2846 } else {
2846 2847 threshhold = nxge_bcopy_thresh;
2847 2848 }
2848 2849 tx_size = nxge_tx_ring_size * threshhold;
2849 2850
2850 2851 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t);
2851 2852 cr_size += sizeof (txdma_mailbox_t);
2852 2853
2853 2854 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2854 2855 if (nxgep->niu_type == N2_NIU) {
2855 2856 if (!ISP2(tx_size)) {
2856 2857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2857 2858 "==> nxge_tdc_sizes: Tx size"
2858 2859 " must be power of 2"));
2859 2860 return (NXGE_ERROR);
2860 2861 }
2861 2862
2862 2863 if (tx_size > (1 << 22)) {
2863 2864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2864 2865 "==> nxge_tdc_sizes: Tx size"
2865 2866 " limited to 4M"));
2866 2867 return (NXGE_ERROR);
2867 2868 }
2868 2869
2869 2870 if (cr_size < 0x2000)
2870 2871 cr_size = 0x2000;
2871 2872 }
2872 2873 #endif
2873 2874
2874 2875 sizes->threshhold = threshhold;
2875 2876 sizes->tx_size = tx_size;
2876 2877 sizes->cr_size = cr_size;
2877 2878
2878 2879 return (NXGE_OK);
2879 2880 }
2880 2881 /*
2881 2882 * nxge_alloc_txb
2882 2883 *
2883 2884 * Allocate buffers for an TDC.
2884 2885 *
2885 2886 * Arguments:
2886 2887 * nxgep
2887 2888 * channel The channel to map into our kernel space.
2888 2889 *
2889 2890 * Notes:
2890 2891 *
2891 2892 * NPI function calls:
2892 2893 *
2893 2894 * NXGE function calls:
2894 2895 *
2895 2896 * Registers accessed:
2896 2897 *
2897 2898 * Context:
2898 2899 *
2899 2900 * Taking apart:
2900 2901 *
2901 2902 * Open questions:
2902 2903 *
2903 2904 */
2904 2905 nxge_status_t
2905 2906 nxge_alloc_txb(
2906 2907 p_nxge_t nxgep,
2907 2908 int channel)
2908 2909 {
2909 2910 nxge_dma_common_t **dma_buf_p;
2910 2911 nxge_dma_common_t **dma_cntl_p;
2911 2912 uint32_t *num_chunks;
2912 2913 nxge_status_t status = NXGE_OK;
2913 2914
2914 2915 nxge_tdc_sizes_t sizes;
2915 2916
2916 2917 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb"));
2917 2918
2918 2919 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK)
2919 2920 return (NXGE_ERROR);
2920 2921
2921 2922 /*
2922 2923 * Allocate memory for transmit buffers and descriptor rings.
2923 2924 * Replace these allocation functions with the interface functions
2924 2925 * provided by the partition manager Real Soon Now.
2925 2926 */
2926 2927 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2927 2928 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel];
2928 2929
2929 2930 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2930 2931
2931 2932 /*
2932 2933 * Allocate memory for transmit buffers and descriptor rings.
2933 2934 * Replace allocation functions with interface functions provided
2934 2935 * by the partition manager when it is available.
2935 2936 *
2936 2937 * Allocate memory for the transmit buffer pool.
2937 2938 */
2938 2939 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2939 2940 "sizes: tx: %ld, cr:%ld, th:%ld",
2940 2941 sizes.tx_size, sizes.cr_size, sizes.threshhold));
2941 2942
2942 2943 *num_chunks = 0;
2943 2944 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p,
2944 2945 sizes.tx_size, sizes.threshhold, num_chunks);
2945 2946 if (status != NXGE_OK) {
2946 2947 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!");
2947 2948 return (status);
2948 2949 }
2949 2950
2950 2951 /*
2951 2952 * Allocate memory for descriptor rings and mailbox.
2952 2953 */
2953 2954 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p,
2954 2955 sizes.cr_size);
2955 2956 if (status != NXGE_OK) {
2956 2957 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks);
2957 2958 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!");
2958 2959 return (status);
2959 2960 }
2960 2961
2961 2962 return (NXGE_OK);
2962 2963 }
2963 2964
2964 2965 void
2965 2966 nxge_free_txb(
2966 2967 p_nxge_t nxgep,
2967 2968 int channel)
2968 2969 {
2969 2970 nxge_dma_common_t *data;
2970 2971 nxge_dma_common_t *control;
2971 2972 uint32_t num_chunks;
2972 2973
2973 2974 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb"));
2974 2975
2975 2976 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2976 2977 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2977 2978 nxge_free_tx_buf_dma(nxgep, data, num_chunks);
2978 2979
2979 2980 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2980 2981 nxgep->tx_buf_pool_p->num_chunks[channel] = 0;
2981 2982
2982 2983 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2983 2984 nxge_free_tx_cntl_dma(nxgep, control);
2984 2985
2985 2986 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2986 2987
2987 2988 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2988 2989 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2989 2990
2990 2991 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb"));
2991 2992 }
2992 2993
2993 2994 /*
2994 2995 * nxge_alloc_tx_mem_pool
2995 2996 *
2996 2997 * This function allocates all of the per-port TDC control data structures.
2997 2998 * The per-channel (TDC) data structures are allocated when needed.
2998 2999 *
2999 3000 * Arguments:
3000 3001 * nxgep
3001 3002 *
3002 3003 * Notes:
3003 3004 *
3004 3005 * Context:
3005 3006 * Any domain
3006 3007 */
3007 3008 nxge_status_t
3008 3009 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
3009 3010 {
3010 3011 nxge_hw_pt_cfg_t *p_cfgp;
3011 3012 nxge_dma_pool_t *dma_poolp;
3012 3013 nxge_dma_common_t **dma_buf_p;
3013 3014 nxge_dma_pool_t *dma_cntl_poolp;
3014 3015 nxge_dma_common_t **dma_cntl_p;
3015 3016 uint32_t *num_chunks; /* per dma */
3016 3017 int tdc_max;
3017 3018
3018 3019 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
3019 3020
3020 3021 p_cfgp = &nxgep->pt_config.hw_config;
3021 3022 tdc_max = NXGE_MAX_TDCS;
3022 3023
3023 3024 /*
3024 3025 * Allocate memory for each transmit DMA channel.
3025 3026 */
3026 3027 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
3027 3028 KM_SLEEP);
3028 3029 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3029 3030 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3030 3031
3031 3032 dma_cntl_poolp = (p_nxge_dma_pool_t)
3032 3033 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
3033 3034 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3034 3035 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3035 3036
3036 3037 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
3037 3038 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3038 3039 "nxge_alloc_tx_mem_pool: TDC too high %d, "
3039 3040 "set to default %d",
3040 3041 nxge_tx_ring_size, TDC_DEFAULT_MAX));
3041 3042 nxge_tx_ring_size = TDC_DEFAULT_MAX;
3042 3043 }
3043 3044
3044 3045 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3045 3046 /*
3046 3047 * N2/NIU has limitation on the descriptor sizes (contiguous
3047 3048 * memory allocation on data buffers to 4M (contig_mem_alloc)
3048 3049 * and little endian for control buffers (must use the ddi/dki mem alloc
3049 3050 * function). The transmit ring is limited to 8K (includes the
3050 3051 * mailbox).
3051 3052 */
3052 3053 if (nxgep->niu_type == N2_NIU) {
3053 3054 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
3054 3055 (!ISP2(nxge_tx_ring_size))) {
3055 3056 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
3056 3057 }
3057 3058 }
3058 3059 #endif
3059 3060
3060 3061 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
3061 3062
3062 3063 num_chunks = (uint32_t *)KMEM_ZALLOC(
3063 3064 sizeof (uint32_t) * tdc_max, KM_SLEEP);
3064 3065
3065 3066 dma_poolp->ndmas = p_cfgp->tdc.owned;
3066 3067 dma_poolp->num_chunks = num_chunks;
3067 3068 dma_poolp->dma_buf_pool_p = dma_buf_p;
3068 3069 nxgep->tx_buf_pool_p = dma_poolp;
3069 3070
3070 3071 dma_poolp->buf_allocated = B_TRUE;
3071 3072
3072 3073 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned;
3073 3074 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
3074 3075 nxgep->tx_cntl_pool_p = dma_cntl_poolp;
3075 3076
3076 3077 dma_cntl_poolp->buf_allocated = B_TRUE;
3077 3078
3078 3079 nxgep->tx_rings =
3079 3080 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
3080 3081 nxgep->tx_rings->rings =
3081 3082 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP);
3082 3083 nxgep->tx_mbox_areas_p =
3083 3084 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
3084 3085 nxgep->tx_mbox_areas_p->txmbox_areas_p =
3085 3086 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP);
3086 3087
3087 3088 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned;
3088 3089
3089 3090 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3090 3091 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3091 3092 tdc_max, dma_poolp->ndmas));
3092 3093
3093 3094 return (NXGE_OK);
3094 3095 }
3095 3096
3096 3097 nxge_status_t
3097 3098 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
3098 3099 p_nxge_dma_common_t *dmap, size_t alloc_size,
3099 3100 size_t block_size, uint32_t *num_chunks)
3100 3101 {
3101 3102 p_nxge_dma_common_t tx_dmap;
3102 3103 nxge_status_t status = NXGE_OK;
3103 3104 size_t total_alloc_size;
3104 3105 size_t allocated = 0;
3105 3106 int i, size_index, array_size;
3106 3107
3107 3108 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
3108 3109
3109 3110 tx_dmap = (p_nxge_dma_common_t)
3110 3111 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
3111 3112 KM_SLEEP);
3112 3113
3113 3114 total_alloc_size = alloc_size;
3114 3115 i = 0;
3115 3116 size_index = 0;
3116 3117 array_size = sizeof (alloc_sizes) / sizeof (size_t);
3117 3118 while ((size_index < array_size) &&
3118 3119 (alloc_sizes[size_index] < alloc_size))
3119 3120 size_index++;
3120 3121 if (size_index >= array_size) {
3121 3122 size_index = array_size - 1;
3122 3123 }
3123 3124
3124 3125 while ((allocated < total_alloc_size) &&
3125 3126 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
3126 3127
3127 3128 tx_dmap[i].dma_chunk_index = i;
3128 3129 tx_dmap[i].block_size = block_size;
3129 3130 tx_dmap[i].alength = alloc_sizes[size_index];
3130 3131 tx_dmap[i].orig_alength = tx_dmap[i].alength;
3131 3132 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
3132 3133 tx_dmap[i].dma_channel = dma_channel;
3133 3134 tx_dmap[i].contig_alloc_type = B_FALSE;
3134 3135 tx_dmap[i].kmem_alloc_type = B_FALSE;
3135 3136
3136 3137 /*
3137 3138 * N2/NIU: data buffers must be contiguous as the driver
3138 3139 * needs to call Hypervisor api to set up
3139 3140 * logical pages.
3140 3141 */
3141 3142 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
3142 3143 tx_dmap[i].contig_alloc_type = B_TRUE;
3143 3144 }
3144 3145
3145 3146 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3146 3147 &nxge_tx_dma_attr,
3147 3148 tx_dmap[i].alength,
3148 3149 &nxge_dev_buf_dma_acc_attr,
3149 3150 DDI_DMA_WRITE | DDI_DMA_STREAMING,
3150 3151 (p_nxge_dma_common_t)(&tx_dmap[i]));
3151 3152 if (status != NXGE_OK) {
3152 3153 size_index--;
3153 3154 } else {
3154 3155 i++;
3155 3156 allocated += alloc_sizes[size_index];
3156 3157 }
3157 3158 }
3158 3159
3159 3160 if (allocated < total_alloc_size) {
3160 3161 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3161 3162 "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3162 3163 "allocated 0x%x requested 0x%x",
3163 3164 dma_channel,
3164 3165 allocated, total_alloc_size));
3165 3166 status = NXGE_ERROR;
3166 3167 goto nxge_alloc_tx_mem_fail1;
3167 3168 }
3168 3169
3169 3170 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3170 3171 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3171 3172 "allocated 0x%x requested 0x%x",
3172 3173 dma_channel,
3173 3174 allocated, total_alloc_size));
3174 3175
3175 3176 *num_chunks = i;
3176 3177 *dmap = tx_dmap;
3177 3178 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3178 3179 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3179 3180 *dmap, i));
3180 3181 goto nxge_alloc_tx_mem_exit;
3181 3182
3182 3183 nxge_alloc_tx_mem_fail1:
3183 3184 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
3184 3185
3185 3186 nxge_alloc_tx_mem_exit:
3186 3187 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3187 3188 "<== nxge_alloc_tx_buf_dma status 0x%08x", status));
3188 3189
3189 3190 return (status);
3190 3191 }
3191 3192
3192 3193 /*ARGSUSED*/
3193 3194 static void
3194 3195 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
3195 3196 uint32_t num_chunks)
3196 3197 {
3197 3198 int i;
3198 3199
3199 3200 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
3200 3201
3201 3202 if (dmap == 0)
3202 3203 return;
3203 3204
3204 3205 for (i = 0; i < num_chunks; i++) {
3205 3206 nxge_dma_mem_free(dmap++);
3206 3207 }
3207 3208
3208 3209 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
3209 3210 }
3210 3211
3211 3212 /*ARGSUSED*/
3212 3213 nxge_status_t
3213 3214 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
3214 3215 p_nxge_dma_common_t *dmap, size_t size)
3215 3216 {
3216 3217 p_nxge_dma_common_t tx_dmap;
3217 3218 nxge_status_t status = NXGE_OK;
3218 3219
3219 3220 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
3220 3221 tx_dmap = (p_nxge_dma_common_t)
3221 3222 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
3222 3223
3223 3224 tx_dmap->contig_alloc_type = B_FALSE;
3224 3225 tx_dmap->kmem_alloc_type = B_FALSE;
3225 3226
3226 3227 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3227 3228 &nxge_desc_dma_attr,
3228 3229 size,
3229 3230 &nxge_dev_desc_dma_acc_attr,
3230 3231 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3231 3232 tx_dmap);
3232 3233 if (status != NXGE_OK) {
3233 3234 goto nxge_alloc_tx_cntl_dma_fail1;
3234 3235 }
3235 3236
3236 3237 *dmap = tx_dmap;
3237 3238 goto nxge_alloc_tx_cntl_dma_exit;
3238 3239
3239 3240 nxge_alloc_tx_cntl_dma_fail1:
3240 3241 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
3241 3242
3242 3243 nxge_alloc_tx_cntl_dma_exit:
3243 3244 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3244 3245 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
3245 3246
3246 3247 return (status);
3247 3248 }
3248 3249
3249 3250 /*ARGSUSED*/
3250 3251 static void
3251 3252 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
3252 3253 {
3253 3254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
3254 3255
3255 3256 if (dmap == 0)
3256 3257 return;
3257 3258
3258 3259 nxge_dma_mem_free(dmap);
3259 3260
3260 3261 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
3261 3262 }
3262 3263
3263 3264 /*
3264 3265 * nxge_free_tx_mem_pool
3265 3266 *
3266 3267 * This function frees all of the per-port TDC control data structures.
3267 3268 * The per-channel (TDC) data structures are freed when the channel
3268 3269 * is stopped.
3269 3270 *
3270 3271 * Arguments:
3271 3272 * nxgep
3272 3273 *
3273 3274 * Notes:
3274 3275 *
3275 3276 * Context:
3276 3277 * Any domain
3277 3278 */
3278 3279 static void
3279 3280 nxge_free_tx_mem_pool(p_nxge_t nxgep)
3280 3281 {
3281 3282 int tdc_max = NXGE_MAX_TDCS;
3282 3283
3283 3284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool"));
3284 3285
3285 3286 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) {
3286 3287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3287 3288 "<== nxge_free_tx_mem_pool "
3288 3289 "(null tx buf pool or buf not allocated"));
3289 3290 return;
3290 3291 }
3291 3292 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) {
3292 3293 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3293 3294 "<== nxge_free_tx_mem_pool "
3294 3295 "(null tx cntl buf pool or cntl buf not allocated"));
3295 3296 return;
3296 3297 }
3297 3298
3298 3299 /* 1. Free the mailboxes. */
3299 3300 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p,
3300 3301 sizeof (p_tx_mbox_t) * tdc_max);
3301 3302 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
3302 3303
3303 3304 nxgep->tx_mbox_areas_p = 0;
3304 3305
3305 3306 /* 2. Free the transmit ring arrays. */
3306 3307 KMEM_FREE(nxgep->tx_rings->rings,
3307 3308 sizeof (p_tx_ring_t) * tdc_max);
3308 3309 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t));
3309 3310
3310 3311 nxgep->tx_rings = 0;
3311 3312
3312 3313 /* 3. Free the completion ring data structures. */
3313 3314 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p,
3314 3315 sizeof (p_nxge_dma_common_t) * tdc_max);
3315 3316 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t));
3316 3317
3317 3318 nxgep->tx_cntl_pool_p = 0;
3318 3319
3319 3320 /* 4. Free the data ring data structures. */
3320 3321 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks,
3321 3322 sizeof (uint32_t) * tdc_max);
3322 3323 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p,
3323 3324 sizeof (p_nxge_dma_common_t) * tdc_max);
3324 3325 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t));
3325 3326
3326 3327 nxgep->tx_buf_pool_p = 0;
3327 3328
3328 3329 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool"));
3329 3330 }
3330 3331
3331 3332 /*ARGSUSED*/
3332 3333 static nxge_status_t
3333 3334 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
3334 3335 struct ddi_dma_attr *dma_attrp,
3335 3336 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
3336 3337 p_nxge_dma_common_t dma_p)
3337 3338 {
3338 3339 caddr_t kaddrp;
3339 3340 int ddi_status = DDI_SUCCESS;
3340 3341 boolean_t contig_alloc_type;
3341 3342 boolean_t kmem_alloc_type;
3342 3343
3343 3344 contig_alloc_type = dma_p->contig_alloc_type;
3344 3345
3345 3346 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
3346 3347 /*
3347 3348 * contig_alloc_type for contiguous memory only allowed
3348 3349 * for N2/NIU.
3349 3350 */
3350 3351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3351 3352 "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3352 3353 dma_p->contig_alloc_type));
3353 3354 return (NXGE_ERROR | NXGE_DDI_FAILED);
3354 3355 }
3355 3356
3356 3357 dma_p->dma_handle = NULL;
3357 3358 dma_p->acc_handle = NULL;
3358 3359 dma_p->kaddrp = dma_p->last_kaddrp = NULL;
3359 3360 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
3360 3361 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
3361 3362 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
3362 3363 if (ddi_status != DDI_SUCCESS) {
3363 3364 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3364 3365 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3365 3366 return (NXGE_ERROR | NXGE_DDI_FAILED);
3366 3367 }
3367 3368
3368 3369 kmem_alloc_type = dma_p->kmem_alloc_type;
3369 3370
3370 3371 switch (contig_alloc_type) {
3371 3372 case B_FALSE:
3372 3373 switch (kmem_alloc_type) {
3373 3374 case B_FALSE:
3374 3375 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle,
3375 3376 length,
3376 3377 acc_attr_p,
3377 3378 xfer_flags,
3378 3379 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
3379 3380 &dma_p->acc_handle);
3380 3381 if (ddi_status != DDI_SUCCESS) {
3381 3382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3382 3383 "nxge_dma_mem_alloc: "
3383 3384 "ddi_dma_mem_alloc failed"));
3384 3385 ddi_dma_free_handle(&dma_p->dma_handle);
3385 3386 dma_p->dma_handle = NULL;
3386 3387 return (NXGE_ERROR | NXGE_DDI_FAILED);
3387 3388 }
3388 3389 if (dma_p->alength < length) {
3389 3390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3390 3391 "nxge_dma_mem_alloc:di_dma_mem_alloc "
3391 3392 "< length."));
3392 3393 ddi_dma_mem_free(&dma_p->acc_handle);
3393 3394 ddi_dma_free_handle(&dma_p->dma_handle);
3394 3395 dma_p->acc_handle = NULL;
3395 3396 dma_p->dma_handle = NULL;
3396 3397 return (NXGE_ERROR);
3397 3398 }
3398 3399
3399 3400 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3400 3401 NULL,
3401 3402 kaddrp, dma_p->alength, xfer_flags,
3402 3403 DDI_DMA_DONTWAIT,
3403 3404 0, &dma_p->dma_cookie, &dma_p->ncookies);
3404 3405 if (ddi_status != DDI_DMA_MAPPED) {
3405 3406 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3406 3407 "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3407 3408 "failed "
3408 3409 "(staus 0x%x ncookies %d.)", ddi_status,
3409 3410 dma_p->ncookies));
3410 3411 if (dma_p->acc_handle) {
3411 3412 ddi_dma_mem_free(&dma_p->acc_handle);
3412 3413 dma_p->acc_handle = NULL;
3413 3414 }
3414 3415 ddi_dma_free_handle(&dma_p->dma_handle);
3415 3416 dma_p->dma_handle = NULL;
3416 3417 return (NXGE_ERROR | NXGE_DDI_FAILED);
3417 3418 }
3418 3419
3419 3420 if (dma_p->ncookies != 1) {
3420 3421 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3421 3422 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3422 3423 "> 1 cookie"
3423 3424 "(staus 0x%x ncookies %d.)", ddi_status,
3424 3425 dma_p->ncookies));
3425 3426 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3426 3427 if (dma_p->acc_handle) {
3427 3428 ddi_dma_mem_free(&dma_p->acc_handle);
3428 3429 dma_p->acc_handle = NULL;
3429 3430 }
3430 3431 ddi_dma_free_handle(&dma_p->dma_handle);
3431 3432 dma_p->dma_handle = NULL;
3432 3433 dma_p->acc_handle = NULL;
3433 3434 return (NXGE_ERROR);
3434 3435 }
3435 3436 break;
3436 3437
3437 3438 case B_TRUE:
3438 3439 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP);
3439 3440 if (kaddrp == NULL) {
3440 3441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3441 3442 "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3442 3443 "kmem alloc failed"));
3443 3444 return (NXGE_ERROR);
3444 3445 }
3445 3446
3446 3447 dma_p->alength = length;
3447 3448 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3448 3449 NULL, kaddrp, dma_p->alength, xfer_flags,
3449 3450 DDI_DMA_DONTWAIT, 0,
3450 3451 &dma_p->dma_cookie, &dma_p->ncookies);
3451 3452 if (ddi_status != DDI_DMA_MAPPED) {
3452 3453 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3453 3454 "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3454 3455 "(kmem_alloc) failed kaddrp $%p length %d "
3455 3456 "(staus 0x%x (%d) ncookies %d.)",
3456 3457 kaddrp, length,
3457 3458 ddi_status, ddi_status, dma_p->ncookies));
3458 3459 KMEM_FREE(kaddrp, length);
3459 3460 dma_p->acc_handle = NULL;
3460 3461 ddi_dma_free_handle(&dma_p->dma_handle);
3461 3462 dma_p->dma_handle = NULL;
3462 3463 dma_p->kaddrp = NULL;
3463 3464 return (NXGE_ERROR | NXGE_DDI_FAILED);
3464 3465 }
3465 3466
3466 3467 if (dma_p->ncookies != 1) {
3467 3468 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3468 3469 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3469 3470 "(kmem_alloc) > 1 cookie"
3470 3471 "(staus 0x%x ncookies %d.)", ddi_status,
3471 3472 dma_p->ncookies));
3472 3473 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3473 3474 KMEM_FREE(kaddrp, length);
3474 3475 ddi_dma_free_handle(&dma_p->dma_handle);
3475 3476 dma_p->dma_handle = NULL;
3476 3477 dma_p->acc_handle = NULL;
3477 3478 dma_p->kaddrp = NULL;
3478 3479 return (NXGE_ERROR);
3479 3480 }
3480 3481
3481 3482 dma_p->kaddrp = kaddrp;
3482 3483
3483 3484 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
3484 3485 "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3485 3486 "kaddr $%p alength %d",
3486 3487 dma_p,
3487 3488 kaddrp,
3488 3489 dma_p->alength));
3489 3490 break;
3490 3491 }
3491 3492 break;
3492 3493
3493 3494 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3494 3495 case B_TRUE:
3495 3496 kaddrp = (caddr_t)contig_mem_alloc(length);
3496 3497 if (kaddrp == NULL) {
3497 3498 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3498 3499 "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3499 3500 ddi_dma_free_handle(&dma_p->dma_handle);
3500 3501 return (NXGE_ERROR | NXGE_DDI_FAILED);
3501 3502 }
3502 3503
3503 3504 dma_p->alength = length;
3504 3505 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
3505 3506 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
3506 3507 &dma_p->dma_cookie, &dma_p->ncookies);
3507 3508 if (ddi_status != DDI_DMA_MAPPED) {
3508 3509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3509 3510 "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3510 3511 "(status 0x%x ncookies %d.)", ddi_status,
3511 3512 dma_p->ncookies));
3512 3513
3513 3514 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3514 3515 "==> nxge_dma_mem_alloc: (not mapped)"
3515 3516 "length %lu (0x%x) "
3516 3517 "free contig kaddrp $%p "
3517 3518 "va_to_pa $%p",
3518 3519 length, length,
3519 3520 kaddrp,
3520 3521 va_to_pa(kaddrp)));
3521 3522
3522 3523
3523 3524 contig_mem_free((void *)kaddrp, length);
3524 3525 ddi_dma_free_handle(&dma_p->dma_handle);
3525 3526
3526 3527 dma_p->dma_handle = NULL;
3527 3528 dma_p->acc_handle = NULL;
3528 3529 dma_p->alength = NULL;
3529 3530 dma_p->kaddrp = NULL;
3530 3531
3531 3532 return (NXGE_ERROR | NXGE_DDI_FAILED);
3532 3533 }
3533 3534
3534 3535 if (dma_p->ncookies != 1 ||
3535 3536 (dma_p->dma_cookie.dmac_laddress == NULL)) {
3536 3537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3537 3538 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3538 3539 "cookie or "
3539 3540 "dmac_laddress is NULL $%p size %d "
3540 3541 " (status 0x%x ncookies %d.)",
3541 3542 ddi_status,
3542 3543 dma_p->dma_cookie.dmac_laddress,
3543 3544 dma_p->dma_cookie.dmac_size,
3544 3545 dma_p->ncookies));
3545 3546
3546 3547 contig_mem_free((void *)kaddrp, length);
3547 3548 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3548 3549 ddi_dma_free_handle(&dma_p->dma_handle);
3549 3550
3550 3551 dma_p->alength = 0;
3551 3552 dma_p->dma_handle = NULL;
3552 3553 dma_p->acc_handle = NULL;
3553 3554 dma_p->kaddrp = NULL;
3554 3555
3555 3556 return (NXGE_ERROR | NXGE_DDI_FAILED);
3556 3557 }
3557 3558 break;
3558 3559
3559 3560 #else
3560 3561 case B_TRUE:
3561 3562 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3562 3563 "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3563 3564 return (NXGE_ERROR | NXGE_DDI_FAILED);
3564 3565 #endif
3565 3566 }
3566 3567
3567 3568 dma_p->kaddrp = kaddrp;
3568 3569 dma_p->last_kaddrp = (unsigned char *)kaddrp +
3569 3570 dma_p->alength - RXBUF_64B_ALIGNED;
3570 3571 #if defined(__i386)
3571 3572 dma_p->ioaddr_pp =
3572 3573 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
3573 3574 #else
3574 3575 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3575 3576 #endif
3576 3577 dma_p->last_ioaddr_pp =
3577 3578 #if defined(__i386)
3578 3579 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
3579 3580 #else
3580 3581 (unsigned char *)dma_p->dma_cookie.dmac_laddress +
3581 3582 #endif
3582 3583 dma_p->alength - RXBUF_64B_ALIGNED;
3583 3584
3584 3585 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
3585 3586
3586 3587 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3587 3588 dma_p->orig_ioaddr_pp =
3588 3589 (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3589 3590 dma_p->orig_alength = length;
3590 3591 dma_p->orig_kaddrp = kaddrp;
3591 3592 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
3592 3593 #endif
3593 3594
3594 3595 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
3595 3596 "dma buffer allocated: dma_p $%p "
3596 3597 "return dmac_ladress from cookie $%p cookie dmac_size %d "
3597 3598 "dma_p->ioaddr_p $%p "
3598 3599 "dma_p->orig_ioaddr_p $%p "
3599 3600 "orig_vatopa $%p "
3600 3601 "alength %d (0x%x) "
3601 3602 "kaddrp $%p "
3602 3603 "length %d (0x%x)",
3603 3604 dma_p,
3604 3605 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
3605 3606 dma_p->ioaddr_pp,
3606 3607 dma_p->orig_ioaddr_pp,
3607 3608 dma_p->orig_vatopa,
3608 3609 dma_p->alength, dma_p->alength,
3609 3610 kaddrp,
3610 3611 length, length));
3611 3612
3612 3613 return (NXGE_OK);
3613 3614 }
3614 3615
3615 3616 static void
3616 3617 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
3617 3618 {
3618 3619 if (dma_p->dma_handle != NULL) {
3619 3620 if (dma_p->ncookies) {
3620 3621 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3621 3622 dma_p->ncookies = 0;
3622 3623 }
3623 3624 ddi_dma_free_handle(&dma_p->dma_handle);
3624 3625 dma_p->dma_handle = NULL;
3625 3626 }
3626 3627
3627 3628 if (dma_p->acc_handle != NULL) {
3628 3629 ddi_dma_mem_free(&dma_p->acc_handle);
3629 3630 dma_p->acc_handle = NULL;
3630 3631 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3631 3632 }
3632 3633
3633 3634 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3634 3635 if (dma_p->contig_alloc_type &&
3635 3636 dma_p->orig_kaddrp && dma_p->orig_alength) {
3636 3637 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3637 3638 "kaddrp $%p (orig_kaddrp $%p)"
3638 3639 "mem type %d ",
3639 3640 "orig_alength %d "
3640 3641 "alength 0x%x (%d)",
3641 3642 dma_p->kaddrp,
3642 3643 dma_p->orig_kaddrp,
3643 3644 dma_p->contig_alloc_type,
3644 3645 dma_p->orig_alength,
3645 3646 dma_p->alength, dma_p->alength));
3646 3647
3647 3648 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3648 3649 dma_p->orig_alength = NULL;
3649 3650 dma_p->orig_kaddrp = NULL;
3650 3651 dma_p->contig_alloc_type = B_FALSE;
3651 3652 }
3652 3653 #endif
3653 3654 dma_p->kaddrp = NULL;
3654 3655 dma_p->alength = NULL;
3655 3656 }
3656 3657
3657 3658 static void
3658 3659 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)
3659 3660 {
3660 3661 uint64_t kaddr;
3661 3662 uint32_t buf_size;
3662 3663
3663 3664 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf"));
3664 3665
3665 3666 if (dma_p->dma_handle != NULL) {
3666 3667 if (dma_p->ncookies) {
3667 3668 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3668 3669 dma_p->ncookies = 0;
3669 3670 }
3670 3671 ddi_dma_free_handle(&dma_p->dma_handle);
3671 3672 dma_p->dma_handle = NULL;
3672 3673 }
3673 3674
3674 3675 if (dma_p->acc_handle != NULL) {
3675 3676 ddi_dma_mem_free(&dma_p->acc_handle);
3676 3677 dma_p->acc_handle = NULL;
3677 3678 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3678 3679 }
3679 3680
3680 3681 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3681 3682 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3682 3683 dma_p,
3683 3684 dma_p->buf_alloc_state));
3684 3685
3685 3686 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) {
3686 3687 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3687 3688 "<== nxge_dma_free_rx_data_buf: "
3688 3689 "outstanding data buffers"));
3689 3690 return;
3690 3691 }
3691 3692
3692 3693 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3693 3694 if (dma_p->contig_alloc_type &&
3694 3695 dma_p->orig_kaddrp && dma_p->orig_alength) {
3695 3696 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: "
3696 3697 "kaddrp $%p (orig_kaddrp $%p)"
3697 3698 "mem type %d ",
3698 3699 "orig_alength %d "
3699 3700 "alength 0x%x (%d)",
3700 3701 dma_p->kaddrp,
3701 3702 dma_p->orig_kaddrp,
3702 3703 dma_p->contig_alloc_type,
3703 3704 dma_p->orig_alength,
3704 3705 dma_p->alength, dma_p->alength));
3705 3706
3706 3707 kaddr = (uint64_t)dma_p->orig_kaddrp;
3707 3708 buf_size = dma_p->orig_alength;
3708 3709 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size);
3709 3710 dma_p->orig_alength = NULL;
3710 3711 dma_p->orig_kaddrp = NULL;
3711 3712 dma_p->contig_alloc_type = B_FALSE;
3712 3713 dma_p->kaddrp = NULL;
3713 3714 dma_p->alength = NULL;
3714 3715 return;
3715 3716 }
3716 3717 #endif
3717 3718
3718 3719 if (dma_p->kmem_alloc_type) {
3719 3720 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3720 3721 "nxge_dma_free_rx_data_buf: free kmem "
3721 3722 "kaddrp $%p (orig_kaddrp $%p)"
3722 3723 "alloc type %d "
3723 3724 "orig_alength %d "
3724 3725 "alength 0x%x (%d)",
3725 3726 dma_p->kaddrp,
3726 3727 dma_p->orig_kaddrp,
3727 3728 dma_p->kmem_alloc_type,
3728 3729 dma_p->orig_alength,
3729 3730 dma_p->alength, dma_p->alength));
3730 3731 #if defined(__i386)
3731 3732 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp;
3732 3733 #else
3733 3734 kaddr = (uint64_t)dma_p->kaddrp;
3734 3735 #endif
3735 3736 buf_size = dma_p->orig_alength;
3736 3737 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3737 3738 "nxge_dma_free_rx_data_buf: free dmap $%p "
3738 3739 "kaddr $%p buf_size %d",
3739 3740 dma_p,
3740 3741 kaddr, buf_size));
3741 3742 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size);
3742 3743 dma_p->alength = 0;
3743 3744 dma_p->orig_alength = 0;
3744 3745 dma_p->kaddrp = NULL;
3745 3746 dma_p->kmem_alloc_type = B_FALSE;
3746 3747 }
3747 3748
3748 3749 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf"));
3749 3750 }
3750 3751
3751 3752 /*
3752 3753 * nxge_m_start() -- start transmitting and receiving.
3753 3754 *
3754 3755 * This function is called by the MAC layer when the first
3755 3756 * stream is open to prepare the hardware ready for sending
3756 3757 * and transmitting packets.
3757 3758 */
3758 3759 static int
3759 3760 nxge_m_start(void *arg)
3760 3761 {
3761 3762 p_nxge_t nxgep = (p_nxge_t)arg;
3762 3763
3763 3764 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3764 3765
3765 3766 /*
3766 3767 * Are we already started?
3767 3768 */
3768 3769 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
3769 3770 return (0);
3770 3771 }
3771 3772
3772 3773 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) {
3773 3774 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
3774 3775 }
3775 3776
3776 3777 /*
3777 3778 * Make sure RX MAC is disabled while we initialize.
3778 3779 */
3779 3780 if (!isLDOMguest(nxgep)) {
3780 3781 (void) nxge_rx_mac_disable(nxgep);
3781 3782 }
3782 3783
3783 3784 /*
3784 3785 * Grab the global lock.
3785 3786 */
3786 3787 MUTEX_ENTER(nxgep->genlock);
3787 3788
3788 3789 /*
3789 3790 * Initialize the driver and hardware.
3790 3791 */
3791 3792 if (nxge_init(nxgep) != NXGE_OK) {
3792 3793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3793 3794 "<== nxge_m_start: initialization failed"));
3794 3795 MUTEX_EXIT(nxgep->genlock);
3795 3796 return (EIO);
3796 3797 }
3797 3798
3798 3799 /*
3799 3800 * Start timer to check the system error and tx hangs
3800 3801 */
3801 3802 if (!isLDOMguest(nxgep))
3802 3803 nxgep->nxge_timerid = nxge_start_timer(nxgep,
3803 3804 nxge_check_hw_state, NXGE_CHECK_TIMER);
3804 3805 #if defined(sun4v)
3805 3806 else
3806 3807 nxge_hio_start_timer(nxgep);
3807 3808 #endif
3808 3809
3809 3810 nxgep->link_notify = B_TRUE;
3810 3811 nxgep->link_check_count = 0;
3811 3812 nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3812 3813
3813 3814 /*
3814 3815 * Let the global lock go, since we are intialized.
3815 3816 */
3816 3817 MUTEX_EXIT(nxgep->genlock);
3817 3818
3818 3819 /*
3819 3820 * Let the MAC start receiving packets, now that
3820 3821 * we are initialized.
3821 3822 */
3822 3823 if (!isLDOMguest(nxgep)) {
3823 3824 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
3824 3825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3825 3826 "<== nxge_m_start: enable of RX mac failed"));
3826 3827 return (EIO);
3827 3828 }
3828 3829
3829 3830 /*
3830 3831 * Enable hardware interrupts.
3831 3832 */
3832 3833 nxge_intr_hw_enable(nxgep);
3833 3834 }
3834 3835 #if defined(sun4v)
3835 3836 else {
3836 3837 /*
3837 3838 * In guest domain we enable RDCs and their interrupts as
3838 3839 * the last step.
3839 3840 */
3840 3841 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) {
3841 3842 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3842 3843 "<== nxge_m_start: enable of RDCs failed"));
3843 3844 return (EIO);
3844 3845 }
3845 3846
3846 3847 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) {
3847 3848 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3848 3849 "<== nxge_m_start: intrs enable for RDCs failed"));
3849 3850 return (EIO);
3850 3851 }
3851 3852 }
3852 3853 #endif
3853 3854 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3854 3855 return (0);
3855 3856 }
3856 3857
3857 3858 static boolean_t
3858 3859 nxge_check_groups_stopped(p_nxge_t nxgep)
3859 3860 {
3860 3861 int i;
3861 3862
3862 3863 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
3863 3864 if (nxgep->rx_hio_groups[i].started)
3864 3865 return (B_FALSE);
3865 3866 }
3866 3867
3867 3868 return (B_TRUE);
3868 3869 }
3869 3870
3870 3871 /*
3871 3872 * nxge_m_stop(): stop transmitting and receiving.
3872 3873 */
3873 3874 static void
3874 3875 nxge_m_stop(void *arg)
3875 3876 {
3876 3877 p_nxge_t nxgep = (p_nxge_t)arg;
3877 3878 boolean_t groups_stopped;
3878 3879
3879 3880 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3880 3881
3881 3882 /*
3882 3883 * Are the groups stopped?
3883 3884 */
3884 3885 groups_stopped = nxge_check_groups_stopped(nxgep);
3885 3886 ASSERT(groups_stopped == B_TRUE);
3886 3887 if (!groups_stopped) {
3887 3888 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n",
3888 3889 nxgep->instance);
3889 3890 return;
3890 3891 }
3891 3892
3892 3893 if (!isLDOMguest(nxgep)) {
3893 3894 /*
3894 3895 * Disable the RX mac.
3895 3896 */
3896 3897 (void) nxge_rx_mac_disable(nxgep);
3897 3898
3898 3899 /*
3899 3900 * Wait for the IPP to drain.
3900 3901 */
3901 3902 (void) nxge_ipp_drain(nxgep);
3902 3903
3903 3904 /*
3904 3905 * Disable hardware interrupts.
3905 3906 */
3906 3907 nxge_intr_hw_disable(nxgep);
3907 3908 }
3908 3909 #if defined(sun4v)
3909 3910 else {
3910 3911 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE);
3911 3912 }
3912 3913 #endif
3913 3914
3914 3915 /*
3915 3916 * Grab the global lock.
3916 3917 */
3917 3918 MUTEX_ENTER(nxgep->genlock);
3918 3919
3919 3920 nxgep->nxge_mac_state = NXGE_MAC_STOPPING;
3920 3921 if (nxgep->nxge_timerid) {
3921 3922 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3922 3923 nxgep->nxge_timerid = 0;
3923 3924 }
3924 3925
3925 3926 /*
3926 3927 * Clean up.
3927 3928 */
3928 3929 nxge_uninit(nxgep);
3929 3930
3930 3931 nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3931 3932
3932 3933 /*
3933 3934 * Let go of the global lock.
3934 3935 */
3935 3936 MUTEX_EXIT(nxgep->genlock);
3936 3937 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3937 3938 }
3938 3939
3939 3940 static int
3940 3941 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3941 3942 {
3942 3943 p_nxge_t nxgep = (p_nxge_t)arg;
3943 3944 struct ether_addr addrp;
3944 3945
3945 3946 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3946 3947 "==> nxge_m_multicst: add %d", add));
3947 3948
3948 3949 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3949 3950 if (add) {
3950 3951 if (nxge_add_mcast_addr(nxgep, &addrp)) {
3951 3952 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3952 3953 "<== nxge_m_multicst: add multicast failed"));
3953 3954 return (EINVAL);
3954 3955 }
3955 3956 } else {
3956 3957 if (nxge_del_mcast_addr(nxgep, &addrp)) {
3957 3958 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3958 3959 "<== nxge_m_multicst: del multicast failed"));
3959 3960 return (EINVAL);
3960 3961 }
3961 3962 }
3962 3963
3963 3964 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3964 3965
3965 3966 return (0);
3966 3967 }
3967 3968
3968 3969 static int
3969 3970 nxge_m_promisc(void *arg, boolean_t on)
3970 3971 {
3971 3972 p_nxge_t nxgep = (p_nxge_t)arg;
3972 3973
3973 3974 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3974 3975 "==> nxge_m_promisc: on %d", on));
3975 3976
3976 3977 if (nxge_set_promisc(nxgep, on)) {
3977 3978 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3978 3979 "<== nxge_m_promisc: set promisc failed"));
3979 3980 return (EINVAL);
3980 3981 }
3981 3982
3982 3983 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3983 3984 "<== nxge_m_promisc: on %d", on));
3984 3985
3985 3986 return (0);
3986 3987 }
3987 3988
3988 3989 static void
3989 3990 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3990 3991 {
3991 3992 p_nxge_t nxgep = (p_nxge_t)arg;
3992 3993 struct iocblk *iocp;
3993 3994 boolean_t need_privilege;
3994 3995 int err;
3995 3996 int cmd;
3996 3997
3997 3998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3998 3999
3999 4000 iocp = (struct iocblk *)mp->b_rptr;
4000 4001 iocp->ioc_error = 0;
4001 4002 need_privilege = B_TRUE;
4002 4003 cmd = iocp->ioc_cmd;
4003 4004 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
4004 4005 switch (cmd) {
4005 4006 default:
4006 4007 miocnak(wq, mp, 0, EINVAL);
4007 4008 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
4008 4009 return;
4009 4010
4010 4011 case LB_GET_INFO_SIZE:
4011 4012 case LB_GET_INFO:
4012 4013 case LB_GET_MODE:
4013 4014 need_privilege = B_FALSE;
4014 4015 break;
4015 4016 case LB_SET_MODE:
4016 4017 break;
4017 4018
4018 4019
4019 4020 case NXGE_GET_MII:
4020 4021 case NXGE_PUT_MII:
4021 4022 case NXGE_GET64:
4022 4023 case NXGE_PUT64:
4023 4024 case NXGE_GET_TX_RING_SZ:
4024 4025 case NXGE_GET_TX_DESC:
4025 4026 case NXGE_TX_SIDE_RESET:
4026 4027 case NXGE_RX_SIDE_RESET:
4027 4028 case NXGE_GLOBAL_RESET:
4028 4029 case NXGE_RESET_MAC:
4029 4030 case NXGE_TX_REGS_DUMP:
4030 4031 case NXGE_RX_REGS_DUMP:
4031 4032 case NXGE_INT_REGS_DUMP:
4032 4033 case NXGE_VIR_INT_REGS_DUMP:
4033 4034 case NXGE_PUT_TCAM:
4034 4035 case NXGE_GET_TCAM:
4035 4036 case NXGE_RTRACE:
4036 4037 case NXGE_RDUMP:
4037 4038 case NXGE_RX_CLASS:
4038 4039 case NXGE_RX_HASH:
4039 4040
4040 4041 need_privilege = B_FALSE;
4041 4042 break;
4042 4043 case NXGE_INJECT_ERR:
4043 4044 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
4044 4045 nxge_err_inject(nxgep, wq, mp);
4045 4046 break;
4046 4047 }
4047 4048
4048 4049 if (need_privilege) {
4049 4050 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
4050 4051 if (err != 0) {
4051 4052 miocnak(wq, mp, 0, err);
4052 4053 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4053 4054 "<== nxge_m_ioctl: no priv"));
4054 4055 return;
4055 4056 }
4056 4057 }
4057 4058
4058 4059 switch (cmd) {
4059 4060
4060 4061 case LB_GET_MODE:
4061 4062 case LB_SET_MODE:
4062 4063 case LB_GET_INFO_SIZE:
4063 4064 case LB_GET_INFO:
4064 4065 nxge_loopback_ioctl(nxgep, wq, mp, iocp);
4065 4066 break;
4066 4067
4067 4068 case NXGE_GET_MII:
4068 4069 case NXGE_PUT_MII:
4069 4070 case NXGE_PUT_TCAM:
4070 4071 case NXGE_GET_TCAM:
4071 4072 case NXGE_GET64:
4072 4073 case NXGE_PUT64:
4073 4074 case NXGE_GET_TX_RING_SZ:
4074 4075 case NXGE_GET_TX_DESC:
4075 4076 case NXGE_TX_SIDE_RESET:
4076 4077 case NXGE_RX_SIDE_RESET:
4077 4078 case NXGE_GLOBAL_RESET:
4078 4079 case NXGE_RESET_MAC:
4079 4080 case NXGE_TX_REGS_DUMP:
4080 4081 case NXGE_RX_REGS_DUMP:
4081 4082 case NXGE_INT_REGS_DUMP:
4082 4083 case NXGE_VIR_INT_REGS_DUMP:
4083 4084 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4084 4085 "==> nxge_m_ioctl: cmd 0x%x", cmd));
4085 4086 nxge_hw_ioctl(nxgep, wq, mp, iocp);
4086 4087 break;
4087 4088 case NXGE_RX_CLASS:
4088 4089 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0)
4089 4090 miocnak(wq, mp, 0, EINVAL);
4090 4091 else
4091 4092 miocack(wq, mp, sizeof (rx_class_cfg_t), 0);
4092 4093 break;
4093 4094 case NXGE_RX_HASH:
4094 4095
4095 4096 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0)
4096 4097 miocnak(wq, mp, 0, EINVAL);
4097 4098 else
4098 4099 miocack(wq, mp, sizeof (cfg_cmd_t), 0);
4099 4100 break;
4100 4101 }
4101 4102
4102 4103 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
4103 4104 }
4104 4105
4105 4106 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
4106 4107
4107 4108 void
4108 4109 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory)
4109 4110 {
4110 4111 p_nxge_mmac_stats_t mmac_stats;
4111 4112 int i;
4112 4113 nxge_mmac_t *mmac_info;
4113 4114
4114 4115 mmac_info = &nxgep->nxge_mmac_info;
4115 4116
4116 4117 mmac_stats = &nxgep->statsp->mmac_stats;
4117 4118 mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
4118 4119 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
4119 4120
4120 4121 for (i = 0; i < ETHERADDRL; i++) {
4121 4122 if (factory) {
4122 4123 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4123 4124 = mmac_info->factory_mac_pool[slot][
4124 4125 (ETHERADDRL-1) - i];
4125 4126 } else {
4126 4127 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4127 4128 = mmac_info->mac_pool[slot].addr[
4128 4129 (ETHERADDRL - 1) - i];
4129 4130 }
4130 4131 }
4131 4132 }
4132 4133
4133 4134 /*
4134 4135 * nxge_altmac_set() -- Set an alternate MAC address
4135 4136 */
4136 4137 static int
4137 4138 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot,
4138 4139 int rdctbl, boolean_t usetbl)
4139 4140 {
4140 4141 uint8_t addrn;
4141 4142 uint8_t portn;
4142 4143 npi_mac_addr_t altmac;
4143 4144 hostinfo_t mac_rdc;
4144 4145 p_nxge_class_pt_cfg_t clscfgp;
4145 4146
4146 4147
4147 4148 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
4148 4149 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
4149 4150 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
4150 4151
4151 4152 portn = nxgep->mac.portnum;
4152 4153 addrn = (uint8_t)slot - 1;
4153 4154
4154 4155 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET,
4155 4156 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS)
4156 4157 return (EIO);
4157 4158
4158 4159 /*
4159 4160 * Set the rdc table number for the host info entry
4160 4161 * for this mac address slot.
4161 4162 */
4162 4163 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
4163 4164 mac_rdc.value = 0;
4164 4165 if (usetbl)
4165 4166 mac_rdc.bits.w0.rdc_tbl_num = rdctbl;
4166 4167 else
4167 4168 mac_rdc.bits.w0.rdc_tbl_num =
4168 4169 clscfgp->mac_host_info[addrn].rdctbl;
4169 4170 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
4170 4171
4171 4172 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
4172 4173 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
4173 4174 return (EIO);
4174 4175 }
4175 4176
4176 4177 /*
4177 4178 * Enable comparison with the alternate MAC address.
4178 4179 * While the first alternate addr is enabled by bit 1 of register
4179 4180 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4180 4181 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4181 4182 * accordingly before calling npi_mac_altaddr_entry.
4182 4183 */
4183 4184 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4184 4185 addrn = (uint8_t)slot - 1;
4185 4186 else
4186 4187 addrn = (uint8_t)slot;
4187 4188
4188 4189 if (npi_mac_altaddr_enable(nxgep->npi_handle,
4189 4190 nxgep->function_num, addrn) != NPI_SUCCESS) {
4190 4191 return (EIO);
4191 4192 }
4192 4193
4193 4194 return (0);
4194 4195 }
4195 4196
4196 4197 /*
4197 4198 * nxeg_m_mmac_add_g() - find an unused address slot, set the address
4198 4199 * value to the one specified, enable the port to start filtering on
4199 4200 * the new MAC address. Returns 0 on success.
4200 4201 */
4201 4202 int
4202 4203 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
4203 4204 boolean_t usetbl)
4204 4205 {
4205 4206 p_nxge_t nxgep = arg;
4206 4207 int slot;
4207 4208 nxge_mmac_t *mmac_info;
4208 4209 int err;
4209 4210 nxge_status_t status;
4210 4211
4211 4212 mutex_enter(nxgep->genlock);
4212 4213
4213 4214 /*
4214 4215 * Make sure that nxge is initialized, if _start() has
4215 4216 * not been called.
4216 4217 */
4217 4218 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4218 4219 status = nxge_init(nxgep);
4219 4220 if (status != NXGE_OK) {
4220 4221 mutex_exit(nxgep->genlock);
4221 4222 return (ENXIO);
4222 4223 }
4223 4224 }
4224 4225
4225 4226 mmac_info = &nxgep->nxge_mmac_info;
4226 4227 if (mmac_info->naddrfree == 0) {
4227 4228 mutex_exit(nxgep->genlock);
4228 4229 return (ENOSPC);
4229 4230 }
4230 4231
4231 4232 /*
4232 4233 * Search for the first available slot. Because naddrfree
4233 4234 * is not zero, we are guaranteed to find one.
4234 4235 * Each of the first two ports of Neptune has 16 alternate
4235 4236 * MAC slots but only the first 7 (of 15) slots have assigned factory
4236 4237 * MAC addresses. We first search among the slots without bundled
4237 4238 * factory MACs. If we fail to find one in that range, then we
4238 4239 * search the slots with bundled factory MACs. A factory MAC
4239 4240 * will be wasted while the slot is used with a user MAC address.
4240 4241 * But the slot could be used by factory MAC again after calling
4241 4242 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4242 4243 */
4243 4244 for (slot = 0; slot <= mmac_info->num_mmac; slot++) {
4244 4245 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4245 4246 break;
4246 4247 }
4247 4248
4248 4249 ASSERT(slot <= mmac_info->num_mmac);
4249 4250
4250 4251 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl,
4251 4252 usetbl)) != 0) {
4252 4253 mutex_exit(nxgep->genlock);
4253 4254 return (err);
4254 4255 }
4255 4256
4256 4257 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
4257 4258 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
4258 4259 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4259 4260 mmac_info->naddrfree--;
4260 4261 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4261 4262
4262 4263 mutex_exit(nxgep->genlock);
4263 4264 return (0);
4264 4265 }
4265 4266
4266 4267 /*
4267 4268 * Remove the specified mac address and update the HW not to filter
4268 4269 * the mac address anymore.
4269 4270 */
4270 4271 int
4271 4272 nxge_m_mmac_remove(void *arg, int slot)
4272 4273 {
4273 4274 p_nxge_t nxgep = arg;
4274 4275 nxge_mmac_t *mmac_info;
4275 4276 uint8_t addrn;
4276 4277 uint8_t portn;
4277 4278 int err = 0;
4278 4279 nxge_status_t status;
4279 4280
4280 4281 mutex_enter(nxgep->genlock);
4281 4282
4282 4283 /*
4283 4284 * Make sure that nxge is initialized, if _start() has
4284 4285 * not been called.
4285 4286 */
4286 4287 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4287 4288 status = nxge_init(nxgep);
4288 4289 if (status != NXGE_OK) {
4289 4290 mutex_exit(nxgep->genlock);
4290 4291 return (ENXIO);
4291 4292 }
4292 4293 }
4293 4294
4294 4295 mmac_info = &nxgep->nxge_mmac_info;
4295 4296 if (slot < 1 || slot > mmac_info->num_mmac) {
4296 4297 mutex_exit(nxgep->genlock);
4297 4298 return (EINVAL);
4298 4299 }
4299 4300
4300 4301 portn = nxgep->mac.portnum;
4301 4302 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4302 4303 addrn = (uint8_t)slot - 1;
4303 4304 else
4304 4305 addrn = (uint8_t)slot;
4305 4306
4306 4307 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4307 4308 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
4308 4309 == NPI_SUCCESS) {
4309 4310 mmac_info->naddrfree++;
4310 4311 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
4311 4312 /*
4312 4313 * Regardless if the MAC we just stopped filtering
4313 4314 * is a user addr or a facory addr, we must set
4314 4315 * the MMAC_VENDOR_ADDR flag if this slot has an
4315 4316 * associated factory MAC to indicate that a factory
4316 4317 * MAC is available.
4317 4318 */
4318 4319 if (slot <= mmac_info->num_factory_mmac) {
4319 4320 mmac_info->mac_pool[slot].flags
4320 4321 |= MMAC_VENDOR_ADDR;
4321 4322 }
4322 4323 /*
4323 4324 * Clear mac_pool[slot].addr so that kstat shows 0
4324 4325 * alternate MAC address if the slot is not used.
4325 4326 * (But nxge_m_mmac_get returns the factory MAC even
4326 4327 * when the slot is not used!)
4327 4328 */
4328 4329 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
4329 4330 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4330 4331 } else {
4331 4332 err = EIO;
4332 4333 }
4333 4334 } else {
4334 4335 err = EINVAL;
4335 4336 }
4336 4337
4337 4338 mutex_exit(nxgep->genlock);
4338 4339 return (err);
4339 4340 }
4340 4341
4341 4342 /*
4342 4343 * The callback to query all the factory addresses. naddr must be the same as
4343 4344 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and
4344 4345 * mcm_addr is the space allocated for keep all the addresses, whose size is
4345 4346 * naddr * MAXMACADDRLEN.
4346 4347 */
4347 4348 static void
4348 4349 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr)
4349 4350 {
4350 4351 nxge_t *nxgep = arg;
4351 4352 nxge_mmac_t *mmac_info;
4352 4353 int i;
4353 4354
4354 4355 mutex_enter(nxgep->genlock);
4355 4356
4356 4357 mmac_info = &nxgep->nxge_mmac_info;
4357 4358 ASSERT(naddr == mmac_info->num_factory_mmac);
4358 4359
4359 4360 for (i = 0; i < naddr; i++) {
4360 4361 bcopy(mmac_info->factory_mac_pool[i + 1],
4361 4362 addr + i * MAXMACADDRLEN, ETHERADDRL);
4362 4363 }
4363 4364
4364 4365 mutex_exit(nxgep->genlock);
4365 4366 }
4366 4367
4367 4368
4368 4369 static boolean_t
4369 4370 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4370 4371 {
4371 4372 nxge_t *nxgep = arg;
4372 4373 uint32_t *txflags = cap_data;
4373 4374
4374 4375 switch (cap) {
4375 4376 case MAC_CAPAB_HCKSUM:
4376 4377 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4377 4378 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
4378 4379 if (nxge_cksum_offload <= 1) {
4379 4380 *txflags = HCKSUM_INET_PARTIAL;
4380 4381 }
4381 4382 break;
4382 4383
4383 4384 case MAC_CAPAB_MULTIFACTADDR: {
4384 4385 mac_capab_multifactaddr_t *mfacp = cap_data;
4385 4386
4386 4387 if (!isLDOMguest(nxgep)) {
4387 4388 mutex_enter(nxgep->genlock);
4388 4389 mfacp->mcm_naddr =
4389 4390 nxgep->nxge_mmac_info.num_factory_mmac;
4390 4391 mfacp->mcm_getaddr = nxge_m_getfactaddr;
4391 4392 mutex_exit(nxgep->genlock);
4392 4393 }
4393 4394 break;
4394 4395 }
4395 4396
4396 4397 case MAC_CAPAB_LSO: {
4397 4398 mac_capab_lso_t *cap_lso = cap_data;
4398 4399
4399 4400 if (nxgep->soft_lso_enable) {
4400 4401 if (nxge_cksum_offload <= 1) {
4401 4402 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
4402 4403 if (nxge_lso_max > NXGE_LSO_MAXLEN) {
4403 4404 nxge_lso_max = NXGE_LSO_MAXLEN;
4404 4405 }
4405 4406 cap_lso->lso_basic_tcp_ipv4.lso_max =
4406 4407 nxge_lso_max;
4407 4408 }
4408 4409 break;
4409 4410 } else {
4410 4411 return (B_FALSE);
4411 4412 }
4412 4413 }
4413 4414
4414 4415 case MAC_CAPAB_RINGS: {
4415 4416 mac_capab_rings_t *cap_rings = cap_data;
4416 4417 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
4417 4418
4418 4419 mutex_enter(nxgep->genlock);
4419 4420 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
4420 4421 if (isLDOMguest(nxgep)) {
4421 4422 cap_rings->mr_group_type =
4422 4423 MAC_GROUP_TYPE_STATIC;
4423 4424 cap_rings->mr_rnum =
4424 4425 NXGE_HIO_SHARE_MAX_CHANNELS;
4425 4426 cap_rings->mr_rget = nxge_fill_ring;
4426 4427 cap_rings->mr_gnum = 1;
4427 4428 cap_rings->mr_gget = nxge_hio_group_get;
4428 4429 cap_rings->mr_gaddring = NULL;
4429 4430 cap_rings->mr_gremring = NULL;
4430 4431 } else {
4431 4432 /*
4432 4433 * Service Domain.
4433 4434 */
4434 4435 cap_rings->mr_group_type =
4435 4436 MAC_GROUP_TYPE_DYNAMIC;
4436 4437 cap_rings->mr_rnum = p_cfgp->max_rdcs;
4437 4438 cap_rings->mr_rget = nxge_fill_ring;
4438 4439 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids;
4439 4440 cap_rings->mr_gget = nxge_hio_group_get;
4440 4441 cap_rings->mr_gaddring = nxge_group_add_ring;
4441 4442 cap_rings->mr_gremring = nxge_group_rem_ring;
4442 4443 }
4443 4444
4444 4445 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4445 4446 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]",
4446 4447 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids));
4447 4448 } else {
4448 4449 /*
4449 4450 * TX Rings.
4450 4451 */
4451 4452 if (isLDOMguest(nxgep)) {
4452 4453 cap_rings->mr_group_type =
4453 4454 MAC_GROUP_TYPE_STATIC;
4454 4455 cap_rings->mr_rnum =
4455 4456 NXGE_HIO_SHARE_MAX_CHANNELS;
4456 4457 cap_rings->mr_rget = nxge_fill_ring;
4457 4458 cap_rings->mr_gnum = 0;
4458 4459 cap_rings->mr_gget = NULL;
4459 4460 cap_rings->mr_gaddring = NULL;
4460 4461 cap_rings->mr_gremring = NULL;
4461 4462 } else {
4462 4463 /*
4463 4464 * Service Domain.
4464 4465 */
4465 4466 cap_rings->mr_group_type =
4466 4467 MAC_GROUP_TYPE_DYNAMIC;
4467 4468 cap_rings->mr_rnum = p_cfgp->tdc.count;
4468 4469 cap_rings->mr_rget = nxge_fill_ring;
4469 4470
4470 4471 /*
4471 4472 * Share capable.
4472 4473 *
4473 4474 * Do not report the default group: hence -1
4474 4475 */
4475 4476 cap_rings->mr_gnum =
4476 4477 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1;
4477 4478 cap_rings->mr_gget = nxge_hio_group_get;
4478 4479 cap_rings->mr_gaddring = nxge_group_add_ring;
4479 4480 cap_rings->mr_gremring = nxge_group_rem_ring;
4480 4481 }
4481 4482
4482 4483 NXGE_DEBUG_MSG((nxgep, TX_CTL,
4483 4484 "==> nxge_m_getcapab: tx rings # of rings %d",
4484 4485 p_cfgp->tdc.count));
4485 4486 }
4486 4487 mutex_exit(nxgep->genlock);
4487 4488 break;
4488 4489 }
4489 4490
4490 4491 #if defined(sun4v)
4491 4492 case MAC_CAPAB_SHARES: {
4492 4493 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data;
4493 4494
4494 4495 /*
4495 4496 * Only the service domain driver responds to
4496 4497 * this capability request.
4497 4498 */
4498 4499 mutex_enter(nxgep->genlock);
4499 4500 if (isLDOMservice(nxgep)) {
4500 4501 mshares->ms_snum = 3;
4501 4502 mshares->ms_handle = (void *)nxgep;
4502 4503 mshares->ms_salloc = nxge_hio_share_alloc;
4503 4504 mshares->ms_sfree = nxge_hio_share_free;
4504 4505 mshares->ms_sadd = nxge_hio_share_add_group;
4505 4506 mshares->ms_sremove = nxge_hio_share_rem_group;
4506 4507 mshares->ms_squery = nxge_hio_share_query;
4507 4508 mshares->ms_sbind = nxge_hio_share_bind;
4508 4509 mshares->ms_sunbind = nxge_hio_share_unbind;
4509 4510 mutex_exit(nxgep->genlock);
4510 4511 } else {
4511 4512 mutex_exit(nxgep->genlock);
4512 4513 return (B_FALSE);
4513 4514 }
4514 4515 break;
4515 4516 }
4516 4517 #endif
4517 4518 default:
4518 4519 return (B_FALSE);
4519 4520 }
4520 4521 return (B_TRUE);
4521 4522 }
4522 4523
4523 4524 static boolean_t
4524 4525 nxge_param_locked(mac_prop_id_t pr_num)
4525 4526 {
4526 4527 /*
4527 4528 * All adv_* parameters are locked (read-only) while
4528 4529 * the device is in any sort of loopback mode ...
4529 4530 */
4530 4531 switch (pr_num) {
4531 4532 case MAC_PROP_ADV_1000FDX_CAP:
4532 4533 case MAC_PROP_EN_1000FDX_CAP:
4533 4534 case MAC_PROP_ADV_1000HDX_CAP:
4534 4535 case MAC_PROP_EN_1000HDX_CAP:
4535 4536 case MAC_PROP_ADV_100FDX_CAP:
4536 4537 case MAC_PROP_EN_100FDX_CAP:
4537 4538 case MAC_PROP_ADV_100HDX_CAP:
4538 4539 case MAC_PROP_EN_100HDX_CAP:
4539 4540 case MAC_PROP_ADV_10FDX_CAP:
4540 4541 case MAC_PROP_EN_10FDX_CAP:
4541 4542 case MAC_PROP_ADV_10HDX_CAP:
4542 4543 case MAC_PROP_EN_10HDX_CAP:
4543 4544 case MAC_PROP_AUTONEG:
4544 4545 case MAC_PROP_FLOWCTRL:
4545 4546 return (B_TRUE);
4546 4547 }
4547 4548 return (B_FALSE);
4548 4549 }
4549 4550
4550 4551 /*
4551 4552 * callback functions for set/get of properties
4552 4553 */
4553 4554 static int
4554 4555 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4555 4556 uint_t pr_valsize, const void *pr_val)
4556 4557 {
4557 4558 nxge_t *nxgep = barg;
4558 4559 p_nxge_param_t param_arr = nxgep->param_arr;
4559 4560 p_nxge_stats_t statsp = nxgep->statsp;
4560 4561 int err = 0;
4561 4562
4562 4563 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
4563 4564
4564 4565 mutex_enter(nxgep->genlock);
4565 4566 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4566 4567 nxge_param_locked(pr_num)) {
4567 4568 /*
4568 4569 * All adv_* parameters are locked (read-only)
4569 4570 * while the device is in any sort of loopback mode.
4570 4571 */
4571 4572 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4572 4573 "==> nxge_m_setprop: loopback mode: read only"));
4573 4574 mutex_exit(nxgep->genlock);
4574 4575 return (EBUSY);
4575 4576 }
4576 4577
4577 4578 switch (pr_num) {
4578 4579 case MAC_PROP_EN_1000FDX_CAP:
4579 4580 nxgep->param_en_1000fdx =
4580 4581 param_arr[param_anar_1000fdx].value = *(uint8_t *)pr_val;
4581 4582 goto reprogram;
4582 4583
4583 4584 case MAC_PROP_EN_100FDX_CAP:
4584 4585 nxgep->param_en_100fdx =
4585 4586 param_arr[param_anar_100fdx].value = *(uint8_t *)pr_val;
4586 4587 goto reprogram;
4587 4588
4588 4589 case MAC_PROP_EN_10FDX_CAP:
4589 4590 nxgep->param_en_10fdx =
4590 4591 param_arr[param_anar_10fdx].value = *(uint8_t *)pr_val;
4591 4592 goto reprogram;
4592 4593
4593 4594 case MAC_PROP_AUTONEG:
4594 4595 param_arr[param_autoneg].value = *(uint8_t *)pr_val;
4595 4596 goto reprogram;
4596 4597
4597 4598 case MAC_PROP_MTU: {
4598 4599 uint32_t cur_mtu, new_mtu, old_framesize;
4599 4600
4600 4601 cur_mtu = nxgep->mac.default_mtu;
4601 4602 ASSERT(pr_valsize >= sizeof (new_mtu));
4602 4603 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
4603 4604
4604 4605 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4605 4606 "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4606 4607 new_mtu, nxgep->mac.is_jumbo));
4607 4608
4608 4609 if (new_mtu == cur_mtu) {
4609 4610 err = 0;
4610 4611 break;
4611 4612 }
4612 4613
4613 4614 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4614 4615 err = EBUSY;
4615 4616 break;
4616 4617 }
4617 4618
4618 4619 if ((new_mtu < NXGE_DEFAULT_MTU) ||
4619 4620 (new_mtu > NXGE_MAXIMUM_MTU)) {
4620 4621 err = EINVAL;
4621 4622 break;
4622 4623 }
4623 4624
4624 4625 old_framesize = (uint32_t)nxgep->mac.maxframesize;
4625 4626 nxgep->mac.maxframesize = (uint16_t)
4626 4627 (new_mtu + NXGE_EHEADER_VLAN_CRC);
4627 4628 if (nxge_mac_set_framesize(nxgep)) {
4628 4629 nxgep->mac.maxframesize =
4629 4630 (uint16_t)old_framesize;
4630 4631 err = EINVAL;
4631 4632 break;
4632 4633 }
4633 4634
4634 4635 nxgep->mac.default_mtu = new_mtu;
4635 4636 nxgep->mac.is_jumbo = (new_mtu > NXGE_DEFAULT_MTU);
4636 4637
4637 4638 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4638 4639 "==> nxge_m_setprop: set MTU: %d maxframe %d",
4639 4640 new_mtu, nxgep->mac.maxframesize));
4640 4641 break;
4641 4642 }
4642 4643
4643 4644 case MAC_PROP_FLOWCTRL: {
4644 4645 link_flowctrl_t fl;
4645 4646
4646 4647 ASSERT(pr_valsize >= sizeof (fl));
4647 4648 bcopy(pr_val, &fl, sizeof (fl));
4648 4649
4649 4650 switch (fl) {
4650 4651 case LINK_FLOWCTRL_NONE:
4651 4652 param_arr[param_anar_pause].value = 0;
4652 4653 break;
4653 4654
4654 4655 case LINK_FLOWCTRL_RX:
4655 4656 param_arr[param_anar_pause].value = 1;
4656 4657 break;
4657 4658
4658 4659 case LINK_FLOWCTRL_TX:
4659 4660 case LINK_FLOWCTRL_BI:
4660 4661 err = EINVAL;
4661 4662 break;
4662 4663 default:
4663 4664 err = EINVAL;
4664 4665 break;
4665 4666 }
4666 4667 reprogram:
4667 4668 if ((err == 0) && !isLDOMguest(nxgep)) {
4668 4669 if (!nxge_param_link_update(nxgep)) {
4669 4670 err = EINVAL;
4670 4671 }
4671 4672 } else {
4672 4673 err = EINVAL;
4673 4674 }
4674 4675 break;
4675 4676 }
4676 4677
4677 4678 case MAC_PROP_PRIVATE:
4678 4679 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4679 4680 "==> nxge_m_setprop: private property"));
4680 4681 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, pr_val);
4681 4682 break;
4682 4683
4683 4684 default:
4684 4685 err = ENOTSUP;
4685 4686 break;
4686 4687 }
4687 4688
4688 4689 mutex_exit(nxgep->genlock);
4689 4690
4690 4691 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4691 4692 "<== nxge_m_setprop (return %d)", err));
4692 4693 return (err);
4693 4694 }
4694 4695
4695 4696 static int
4696 4697 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4697 4698 uint_t pr_valsize, void *pr_val)
4698 4699 {
4699 4700 nxge_t *nxgep = barg;
4700 4701 p_nxge_param_t param_arr = nxgep->param_arr;
4701 4702 p_nxge_stats_t statsp = nxgep->statsp;
4702 4703
4703 4704 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4704 4705 "==> nxge_m_getprop: pr_num %d", pr_num));
4705 4706
4706 4707 switch (pr_num) {
4707 4708 case MAC_PROP_DUPLEX:
4708 4709 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4709 4710 break;
4710 4711
4711 4712 case MAC_PROP_SPEED: {
4712 4713 uint64_t val = statsp->mac_stats.link_speed * 1000000ull;
4713 4714
4714 4715 ASSERT(pr_valsize >= sizeof (val));
4715 4716 bcopy(&val, pr_val, sizeof (val));
4716 4717 break;
4717 4718 }
4718 4719
4719 4720 case MAC_PROP_STATUS: {
4720 4721 link_state_t state = statsp->mac_stats.link_up ?
4721 4722 LINK_STATE_UP : LINK_STATE_DOWN;
4722 4723
4723 4724 ASSERT(pr_valsize >= sizeof (state));
4724 4725 bcopy(&state, pr_val, sizeof (state));
4725 4726 break;
4726 4727 }
4727 4728
4728 4729 case MAC_PROP_AUTONEG:
4729 4730 *(uint8_t *)pr_val = param_arr[param_autoneg].value;
4730 4731 break;
4731 4732
4732 4733 case MAC_PROP_FLOWCTRL: {
4733 4734 link_flowctrl_t fl = param_arr[param_anar_pause].value != 0 ?
4734 4735 LINK_FLOWCTRL_RX : LINK_FLOWCTRL_NONE;
4735 4736
4736 4737 ASSERT(pr_valsize >= sizeof (fl));
4737 4738 bcopy(&fl, pr_val, sizeof (fl));
4738 4739 break;
4739 4740 }
4740 4741
4741 4742 case MAC_PROP_ADV_1000FDX_CAP:
4742 4743 *(uint8_t *)pr_val = param_arr[param_anar_1000fdx].value;
4743 4744 break;
4744 4745
4745 4746 case MAC_PROP_EN_1000FDX_CAP:
4746 4747 *(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4747 4748 break;
4748 4749
4749 4750 case MAC_PROP_ADV_100FDX_CAP:
4750 4751 *(uint8_t *)pr_val = param_arr[param_anar_100fdx].value;
4751 4752 break;
4752 4753
4753 4754 case MAC_PROP_EN_100FDX_CAP:
4754 4755 *(uint8_t *)pr_val = nxgep->param_en_100fdx;
4755 4756 break;
4756 4757
4757 4758 case MAC_PROP_ADV_10FDX_CAP:
4758 4759 *(uint8_t *)pr_val = param_arr[param_anar_10fdx].value;
4759 4760 break;
4760 4761
4761 4762 case MAC_PROP_EN_10FDX_CAP:
4762 4763 *(uint8_t *)pr_val = nxgep->param_en_10fdx;
4763 4764 break;
4764 4765
4765 4766 case MAC_PROP_PRIVATE:
4766 4767 return (nxge_get_priv_prop(nxgep, pr_name, pr_valsize,
4767 4768 pr_val));
4768 4769
4769 4770 default:
4770 4771 return (ENOTSUP);
4771 4772 }
4772 4773
4773 4774 return (0);
4774 4775 }
4775 4776
4776 4777 static void
4777 4778 nxge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4778 4779 mac_prop_info_handle_t prh)
4779 4780 {
4780 4781 nxge_t *nxgep = barg;
4781 4782 p_nxge_stats_t statsp = nxgep->statsp;
4782 4783
4783 4784 /*
4784 4785 * By default permissions are read/write unless specified
4785 4786 * otherwise by the driver.
4786 4787 */
4787 4788
4788 4789 switch (pr_num) {
4789 4790 case MAC_PROP_DUPLEX:
4790 4791 case MAC_PROP_SPEED:
4791 4792 case MAC_PROP_STATUS:
4792 4793 case MAC_PROP_EN_1000HDX_CAP:
4793 4794 case MAC_PROP_EN_100HDX_CAP:
4794 4795 case MAC_PROP_EN_10HDX_CAP:
4795 4796 case MAC_PROP_ADV_1000FDX_CAP:
4796 4797 case MAC_PROP_ADV_1000HDX_CAP:
4797 4798 case MAC_PROP_ADV_100FDX_CAP:
4798 4799 case MAC_PROP_ADV_100HDX_CAP:
4799 4800 case MAC_PROP_ADV_10FDX_CAP:
4800 4801 case MAC_PROP_ADV_10HDX_CAP:
4801 4802 /*
4802 4803 * Note that read-only properties don't need to
4803 4804 * provide default values since they cannot be
4804 4805 * changed by the administrator.
4805 4806 */
4806 4807 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4807 4808 break;
4808 4809
4809 4810 case MAC_PROP_EN_1000FDX_CAP:
4810 4811 case MAC_PROP_EN_100FDX_CAP:
4811 4812 case MAC_PROP_EN_10FDX_CAP:
4812 4813 mac_prop_info_set_default_uint8(prh, 1);
4813 4814 break;
4814 4815
4815 4816 case MAC_PROP_AUTONEG:
4816 4817 mac_prop_info_set_default_uint8(prh, 1);
4817 4818 break;
4818 4819
4819 4820 case MAC_PROP_FLOWCTRL:
4820 4821 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_RX);
4821 4822 break;
4822 4823
4823 4824 case MAC_PROP_MTU:
4824 4825 mac_prop_info_set_range_uint32(prh,
4825 4826 NXGE_DEFAULT_MTU, NXGE_MAXIMUM_MTU);
4826 4827 break;
4827 4828
4828 4829 case MAC_PROP_PRIVATE:
4829 4830 nxge_priv_propinfo(pr_name, prh);
4830 4831 break;
4831 4832 }
4832 4833
4833 4834 mutex_enter(nxgep->genlock);
4834 4835 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4835 4836 nxge_param_locked(pr_num)) {
4836 4837 /*
4837 4838 * Some properties are locked (read-only) while the
4838 4839 * device is in any sort of loopback mode.
4839 4840 */
4840 4841 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4841 4842 }
4842 4843 mutex_exit(nxgep->genlock);
4843 4844 }
4844 4845
4845 4846 static void
4846 4847 nxge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t prh)
4847 4848 {
4848 4849 char valstr[64];
4849 4850
4850 4851 bzero(valstr, sizeof (valstr));
4851 4852
4852 4853 if (strcmp(pr_name, "_function_number") == 0 ||
4853 4854 strcmp(pr_name, "_fw_version") == 0 ||
4854 4855 strcmp(pr_name, "_port_mode") == 0 ||
4855 4856 strcmp(pr_name, "_hot_swap_phy") == 0) {
4856 4857 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4857 4858
4858 4859 } else if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4859 4860 (void) snprintf(valstr, sizeof (valstr),
4860 4861 "%d", RXDMA_RCR_TO_DEFAULT);
4861 4862
4862 4863 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4863 4864 (void) snprintf(valstr, sizeof (valstr),
4864 4865 "%d", RXDMA_RCR_PTHRES_DEFAULT);
4865 4866
4866 4867 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
4867 4868 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
4868 4869 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
4869 4870 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
4870 4871 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
4871 4872 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
4872 4873 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
4873 4874 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4874 4875 (void) snprintf(valstr, sizeof (valstr), "%x",
4875 4876 NXGE_CLASS_FLOW_GEN_SERVER);
4876 4877
4877 4878 } else if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4878 4879 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
4879 4880
4880 4881 } else if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
4881 4882 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4882 4883
4883 4884 } else if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4884 4885 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4885 4886 }
4886 4887
4887 4888 if (strlen(valstr) > 0)
4888 4889 mac_prop_info_set_default_str(prh, valstr);
4889 4890 }
4890 4891
4891 4892 /* ARGSUSED */
4892 4893 static int
4893 4894 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4894 4895 const void *pr_val)
4895 4896 {
4896 4897 p_nxge_param_t param_arr = nxgep->param_arr;
4897 4898 int err = 0;
4898 4899 long result;
4899 4900
4900 4901 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4901 4902 "==> nxge_set_priv_prop: name %s", pr_name));
4902 4903
4903 4904 /* Blanking */
4904 4905 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4905 4906 err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4906 4907 (char *)pr_val,
4907 4908 (caddr_t)¶m_arr[param_rxdma_intr_time]);
4908 4909 if (err) {
4909 4910 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4910 4911 "<== nxge_set_priv_prop: "
4911 4912 "unable to set (%s)", pr_name));
4912 4913 err = EINVAL;
4913 4914 } else {
4914 4915 err = 0;
4915 4916 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4916 4917 "<== nxge_set_priv_prop: "
4917 4918 "set (%s)", pr_name));
4918 4919 }
4919 4920
4920 4921 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4921 4922 "<== nxge_set_priv_prop: name %s (value %d)",
4922 4923 pr_name, result));
4923 4924
4924 4925 return (err);
4925 4926 }
4926 4927
4927 4928 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4928 4929 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4929 4930 (char *)pr_val,
4930 4931 (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
4931 4932 if (err) {
4932 4933 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4933 4934 "<== nxge_set_priv_prop: "
4934 4935 "unable to set (%s)", pr_name));
4935 4936 err = EINVAL;
4936 4937 } else {
4937 4938 err = 0;
4938 4939 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4939 4940 "<== nxge_set_priv_prop: "
4940 4941 "set (%s)", pr_name));
4941 4942 }
4942 4943
4943 4944 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4944 4945 "<== nxge_set_priv_prop: name %s (value %d)",
4945 4946 pr_name, result));
4946 4947
4947 4948 return (err);
4948 4949 }
4949 4950
4950 4951 /* Classification */
4951 4952 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4952 4953 if (pr_val == NULL) {
4953 4954 err = EINVAL;
4954 4955 return (err);
4955 4956 }
4956 4957 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4957 4958
4958 4959 err = nxge_param_set_ip_opt(nxgep, NULL,
4959 4960 NULL, (char *)pr_val,
4960 4961 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
4961 4962
4962 4963 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4963 4964 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4964 4965 pr_name, result));
4965 4966
4966 4967 return (err);
4967 4968 }
4968 4969
4969 4970 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4970 4971 if (pr_val == NULL) {
4971 4972 err = EINVAL;
4972 4973 return (err);
4973 4974 }
4974 4975 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4975 4976
4976 4977 err = nxge_param_set_ip_opt(nxgep, NULL,
4977 4978 NULL, (char *)pr_val,
4978 4979 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
4979 4980
4980 4981 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4981 4982 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4982 4983 pr_name, result));
4983 4984
4984 4985 return (err);
4985 4986 }
4986 4987 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4987 4988 if (pr_val == NULL) {
4988 4989 err = EINVAL;
4989 4990 return (err);
4990 4991 }
4991 4992 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4992 4993
4993 4994 err = nxge_param_set_ip_opt(nxgep, NULL,
4994 4995 NULL, (char *)pr_val,
4995 4996 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
4996 4997
4997 4998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4998 4999 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4999 5000 pr_name, result));
5000 5001
5001 5002 return (err);
5002 5003 }
5003 5004 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5004 5005 if (pr_val == NULL) {
5005 5006 err = EINVAL;
5006 5007 return (err);
5007 5008 }
5008 5009 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5009 5010
5010 5011 err = nxge_param_set_ip_opt(nxgep, NULL,
5011 5012 NULL, (char *)pr_val,
5012 5013 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5013 5014
5014 5015 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5015 5016 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5016 5017 pr_name, result));
5017 5018
5018 5019 return (err);
5019 5020 }
5020 5021
5021 5022 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5022 5023 if (pr_val == NULL) {
5023 5024 err = EINVAL;
5024 5025 return (err);
5025 5026 }
5026 5027 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5027 5028
5028 5029 err = nxge_param_set_ip_opt(nxgep, NULL,
5029 5030 NULL, (char *)pr_val,
5030 5031 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5031 5032
5032 5033 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5033 5034 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5034 5035 pr_name, result));
5035 5036
5036 5037 return (err);
5037 5038 }
5038 5039
5039 5040 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5040 5041 if (pr_val == NULL) {
5041 5042 err = EINVAL;
5042 5043 return (err);
5043 5044 }
5044 5045 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5045 5046
5046 5047 err = nxge_param_set_ip_opt(nxgep, NULL,
5047 5048 NULL, (char *)pr_val,
5048 5049 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5049 5050
5050 5051 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5051 5052 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5052 5053 pr_name, result));
5053 5054
5054 5055 return (err);
5055 5056 }
5056 5057 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5057 5058 if (pr_val == NULL) {
5058 5059 err = EINVAL;
5059 5060 return (err);
5060 5061 }
5061 5062 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5062 5063
5063 5064 err = nxge_param_set_ip_opt(nxgep, NULL,
5064 5065 NULL, (char *)pr_val,
5065 5066 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5066 5067
5067 5068 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5068 5069 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5069 5070 pr_name, result));
5070 5071
5071 5072 return (err);
5072 5073 }
5073 5074 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5074 5075 if (pr_val == NULL) {
5075 5076 err = EINVAL;
5076 5077 return (err);
5077 5078 }
5078 5079 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5079 5080
5080 5081 err = nxge_param_set_ip_opt(nxgep, NULL,
5081 5082 NULL, (char *)pr_val,
5082 5083 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5083 5084
5084 5085 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5085 5086 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5086 5087 pr_name, result));
5087 5088
5088 5089 return (err);
5089 5090 }
5090 5091
5091 5092 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5092 5093 if (pr_val == NULL) {
5093 5094 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5094 5095 "==> nxge_set_priv_prop: name %s (null)", pr_name));
5095 5096 err = EINVAL;
5096 5097 return (err);
5097 5098 }
5098 5099
5099 5100 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5100 5101 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5101 5102 "<== nxge_set_priv_prop: name %s "
5102 5103 "(lso %d pr_val %s value %d)",
5103 5104 pr_name, nxgep->soft_lso_enable, pr_val, result));
5104 5105
5105 5106 if (result > 1 || result < 0) {
5106 5107 err = EINVAL;
5107 5108 } else {
5108 5109 if (nxgep->soft_lso_enable == (uint32_t)result) {
5109 5110 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5110 5111 "no change (%d %d)",
5111 5112 nxgep->soft_lso_enable, result));
5112 5113 return (0);
5113 5114 }
5114 5115 }
5115 5116
5116 5117 nxgep->soft_lso_enable = (int)result;
5117 5118
5118 5119 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5119 5120 "<== nxge_set_priv_prop: name %s (value %d)",
5120 5121 pr_name, result));
5121 5122
5122 5123 return (err);
5123 5124 }
5124 5125 /*
5125 5126 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5126 5127 * following code to be executed.
5127 5128 */
5128 5129 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
↓ open down ↓ |
5096 lines elided |
↑ open up ↑ |
5129 5130 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5130 5131 (caddr_t)¶m_arr[param_anar_10gfdx]);
5131 5132 return (err);
5132 5133 }
5133 5134 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5134 5135 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5135 5136 (caddr_t)¶m_arr[param_anar_pause]);
5136 5137 return (err);
5137 5138 }
5138 5139
5139 - return (EINVAL);
5140 + return (ENOTSUP);
5140 5141 }
5141 5142
5142 5143 static int
5143 5144 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
5144 5145 void *pr_val)
5145 5146 {
5146 5147 p_nxge_param_t param_arr = nxgep->param_arr;
5147 5148 char valstr[MAXNAMELEN];
5148 - int err = EINVAL;
5149 + int err = ENOTSUP;
5149 5150 uint_t strsize;
5150 5151
5151 5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5152 5153 "==> nxge_get_priv_prop: property %s", pr_name));
5153 5154
5154 5155 /* function number */
5155 5156 if (strcmp(pr_name, "_function_number") == 0) {
5156 5157 (void) snprintf(valstr, sizeof (valstr), "%d",
5157 5158 nxgep->function_num);
5158 5159 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5159 5160 "==> nxge_get_priv_prop: name %s "
5160 5161 "(value %d valstr %s)",
5161 5162 pr_name, nxgep->function_num, valstr));
5162 5163
5163 5164 err = 0;
5164 5165 goto done;
5165 5166 }
5166 5167
5167 5168 /* Neptune firmware version */
5168 5169 if (strcmp(pr_name, "_fw_version") == 0) {
5169 5170 (void) snprintf(valstr, sizeof (valstr), "%s",
5170 5171 nxgep->vpd_info.ver);
5171 5172 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5172 5173 "==> nxge_get_priv_prop: name %s "
5173 5174 "(value %d valstr %s)",
5174 5175 pr_name, nxgep->vpd_info.ver, valstr));
5175 5176
5176 5177 err = 0;
5177 5178 goto done;
5178 5179 }
5179 5180
5180 5181 /* port PHY mode */
5181 5182 if (strcmp(pr_name, "_port_mode") == 0) {
5182 5183 switch (nxgep->mac.portmode) {
5183 5184 case PORT_1G_COPPER:
5184 5185 (void) snprintf(valstr, sizeof (valstr), "1G copper %s",
5185 5186 nxgep->hot_swappable_phy ?
5186 5187 "[Hot Swappable]" : "");
5187 5188 break;
5188 5189 case PORT_1G_FIBER:
5189 5190 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s",
5190 5191 nxgep->hot_swappable_phy ?
5191 5192 "[hot swappable]" : "");
5192 5193 break;
5193 5194 case PORT_10G_COPPER:
5194 5195 (void) snprintf(valstr, sizeof (valstr),
5195 5196 "10G copper %s",
5196 5197 nxgep->hot_swappable_phy ?
5197 5198 "[hot swappable]" : "");
5198 5199 break;
5199 5200 case PORT_10G_FIBER:
5200 5201 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s",
5201 5202 nxgep->hot_swappable_phy ?
5202 5203 "[hot swappable]" : "");
5203 5204 break;
5204 5205 case PORT_10G_SERDES:
5205 5206 (void) snprintf(valstr, sizeof (valstr),
5206 5207 "10G serdes %s", nxgep->hot_swappable_phy ?
5207 5208 "[hot swappable]" : "");
5208 5209 break;
5209 5210 case PORT_1G_SERDES:
5210 5211 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s",
5211 5212 nxgep->hot_swappable_phy ?
5212 5213 "[hot swappable]" : "");
5213 5214 break;
5214 5215 case PORT_1G_TN1010:
5215 5216 (void) snprintf(valstr, sizeof (valstr),
5216 5217 "1G TN1010 copper %s", nxgep->hot_swappable_phy ?
5217 5218 "[hot swappable]" : "");
5218 5219 break;
5219 5220 case PORT_10G_TN1010:
5220 5221 (void) snprintf(valstr, sizeof (valstr),
5221 5222 "10G TN1010 copper %s", nxgep->hot_swappable_phy ?
5222 5223 "[hot swappable]" : "");
5223 5224 break;
5224 5225 case PORT_1G_RGMII_FIBER:
5225 5226 (void) snprintf(valstr, sizeof (valstr),
5226 5227 "1G rgmii fiber %s", nxgep->hot_swappable_phy ?
5227 5228 "[hot swappable]" : "");
5228 5229 break;
5229 5230 case PORT_HSP_MODE:
5230 5231 (void) snprintf(valstr, sizeof (valstr),
5231 5232 "phy not present[hot swappable]");
5232 5233 break;
5233 5234 default:
5234 5235 (void) snprintf(valstr, sizeof (valstr), "unknown %s",
5235 5236 nxgep->hot_swappable_phy ?
5236 5237 "[hot swappable]" : "");
5237 5238 break;
5238 5239 }
5239 5240
5240 5241 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5241 5242 "==> nxge_get_priv_prop: name %s (value %s)",
5242 5243 pr_name, valstr));
5243 5244
5244 5245 err = 0;
5245 5246 goto done;
5246 5247 }
5247 5248
5248 5249 /* Hot swappable PHY */
5249 5250 if (strcmp(pr_name, "_hot_swap_phy") == 0) {
5250 5251 (void) snprintf(valstr, sizeof (valstr), "%s",
5251 5252 nxgep->hot_swappable_phy ?
5252 5253 "yes" : "no");
5253 5254
5254 5255 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5255 5256 "==> nxge_get_priv_prop: name %s "
5256 5257 "(value %d valstr %s)",
5257 5258 pr_name, nxgep->hot_swappable_phy, valstr));
5258 5259
5259 5260 err = 0;
5260 5261 goto done;
5261 5262 }
5262 5263
5263 5264
5264 5265 /* Receive Interrupt Blanking Parameters */
5265 5266 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
5266 5267 err = 0;
5267 5268 (void) snprintf(valstr, sizeof (valstr), "%d",
5268 5269 nxgep->intr_timeout);
5269 5270 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5270 5271 "==> nxge_get_priv_prop: name %s (value %d)",
5271 5272 pr_name,
5272 5273 (uint32_t)nxgep->intr_timeout));
5273 5274 goto done;
5274 5275 }
5275 5276
5276 5277 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
5277 5278 err = 0;
5278 5279 (void) snprintf(valstr, sizeof (valstr), "%d",
5279 5280 nxgep->intr_threshold);
5280 5281 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5281 5282 "==> nxge_get_priv_prop: name %s (value %d)",
5282 5283 pr_name, (uint32_t)nxgep->intr_threshold));
5283 5284
5284 5285 goto done;
5285 5286 }
5286 5287
5287 5288 /* Classification and Load Distribution Configuration */
5288 5289 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
5289 5290 err = nxge_dld_get_ip_opt(nxgep,
5290 5291 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
5291 5292
5292 5293 (void) snprintf(valstr, sizeof (valstr), "%x",
5293 5294 (int)param_arr[param_class_opt_ipv4_tcp].value);
5294 5295
5295 5296 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5296 5297 "==> nxge_get_priv_prop: %s", valstr));
5297 5298 goto done;
5298 5299 }
5299 5300
5300 5301 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5301 5302 err = nxge_dld_get_ip_opt(nxgep,
5302 5303 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
5303 5304
5304 5305 (void) snprintf(valstr, sizeof (valstr), "%x",
5305 5306 (int)param_arr[param_class_opt_ipv4_udp].value);
5306 5307
5307 5308 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5308 5309 "==> nxge_get_priv_prop: %s", valstr));
5309 5310 goto done;
5310 5311 }
5311 5312 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5312 5313 err = nxge_dld_get_ip_opt(nxgep,
5313 5314 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
5314 5315
5315 5316 (void) snprintf(valstr, sizeof (valstr), "%x",
5316 5317 (int)param_arr[param_class_opt_ipv4_ah].value);
5317 5318
5318 5319 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5319 5320 "==> nxge_get_priv_prop: %s", valstr));
5320 5321 goto done;
5321 5322 }
5322 5323
5323 5324 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5324 5325 err = nxge_dld_get_ip_opt(nxgep,
5325 5326 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5326 5327
5327 5328 (void) snprintf(valstr, sizeof (valstr), "%x",
5328 5329 (int)param_arr[param_class_opt_ipv4_sctp].value);
5329 5330
5330 5331 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5331 5332 "==> nxge_get_priv_prop: %s", valstr));
5332 5333 goto done;
5333 5334 }
5334 5335
5335 5336 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5336 5337 err = nxge_dld_get_ip_opt(nxgep,
5337 5338 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5338 5339
5339 5340 (void) snprintf(valstr, sizeof (valstr), "%x",
5340 5341 (int)param_arr[param_class_opt_ipv6_tcp].value);
5341 5342
5342 5343 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5343 5344 "==> nxge_get_priv_prop: %s", valstr));
5344 5345 goto done;
5345 5346 }
5346 5347
5347 5348 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5348 5349 err = nxge_dld_get_ip_opt(nxgep,
5349 5350 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5350 5351
5351 5352 (void) snprintf(valstr, sizeof (valstr), "%x",
5352 5353 (int)param_arr[param_class_opt_ipv6_udp].value);
5353 5354
5354 5355 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5355 5356 "==> nxge_get_priv_prop: %s", valstr));
5356 5357 goto done;
5357 5358 }
5358 5359
5359 5360 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5360 5361 err = nxge_dld_get_ip_opt(nxgep,
5361 5362 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5362 5363
5363 5364 (void) snprintf(valstr, sizeof (valstr), "%x",
5364 5365 (int)param_arr[param_class_opt_ipv6_ah].value);
5365 5366
5366 5367 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5367 5368 "==> nxge_get_priv_prop: %s", valstr));
5368 5369 goto done;
5369 5370 }
5370 5371
5371 5372 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5372 5373 err = nxge_dld_get_ip_opt(nxgep,
5373 5374 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5374 5375
5375 5376 (void) snprintf(valstr, sizeof (valstr), "%x",
5376 5377 (int)param_arr[param_class_opt_ipv6_sctp].value);
5377 5378
5378 5379 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5379 5380 "==> nxge_get_priv_prop: %s", valstr));
5380 5381 goto done;
5381 5382 }
5382 5383
5383 5384 /* Software LSO */
5384 5385 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5385 5386 (void) snprintf(valstr, sizeof (valstr),
5386 5387 "%d", nxgep->soft_lso_enable);
5387 5388 err = 0;
5388 5389 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5389 5390 "==> nxge_get_priv_prop: name %s (value %d)",
5390 5391 pr_name, nxgep->soft_lso_enable));
5391 5392
5392 5393 goto done;
5393 5394 }
5394 5395 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5395 5396 err = 0;
5396 5397 if (nxgep->param_arr[param_anar_10gfdx].value != 0) {
5397 5398 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5398 5399 goto done;
5399 5400 } else {
5400 5401 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5401 5402 goto done;
5402 5403 }
5403 5404 }
5404 5405 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5405 5406 err = 0;
5406 5407 if (nxgep->param_arr[param_anar_pause].value != 0) {
5407 5408 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5408 5409 goto done;
5409 5410 } else {
5410 5411 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5411 5412 goto done;
5412 5413 }
5413 5414 }
5414 5415
5415 5416 done:
5416 5417 if (err == 0) {
5417 5418 strsize = (uint_t)strlen(valstr);
5418 5419 if (pr_valsize < strsize) {
5419 5420 err = ENOBUFS;
5420 5421 } else {
5421 5422 (void) strlcpy(pr_val, valstr, pr_valsize);
5422 5423 }
5423 5424 }
5424 5425
5425 5426 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5426 5427 "<== nxge_get_priv_prop: return %d", err));
5427 5428 return (err);
5428 5429 }
5429 5430
5430 5431 /*
5431 5432 * Module loading and removing entry points.
5432 5433 */
5433 5434
5434 5435 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach,
5435 5436 nodev, NULL, D_MP, NULL, nxge_quiesce);
5436 5437
5437 5438 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet"
5438 5439
5439 5440 /*
5440 5441 * Module linkage information for the kernel.
5441 5442 */
5442 5443 static struct modldrv nxge_modldrv = {
5443 5444 &mod_driverops,
5444 5445 NXGE_DESC_VER,
5445 5446 &nxge_dev_ops
5446 5447 };
5447 5448
5448 5449 static struct modlinkage modlinkage = {
5449 5450 MODREV_1, (void *) &nxge_modldrv, NULL
5450 5451 };
5451 5452
5452 5453 int
5453 5454 _init(void)
5454 5455 {
5455 5456 int status;
5456 5457
5457 5458 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
5458 5459
5459 5460 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
5460 5461
5461 5462 mac_init_ops(&nxge_dev_ops, "nxge");
5462 5463
5463 5464 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
5464 5465 if (status != 0) {
5465 5466 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
5466 5467 "failed to init device soft state"));
5467 5468 goto _init_exit;
5468 5469 }
5469 5470
5470 5471 status = mod_install(&modlinkage);
5471 5472 if (status != 0) {
5472 5473 ddi_soft_state_fini(&nxge_list);
5473 5474 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
5474 5475 goto _init_exit;
5475 5476 }
5476 5477
5477 5478 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
5478 5479
5479 5480 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5480 5481 return (status);
5481 5482
5482 5483 _init_exit:
5483 5484 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5484 5485 MUTEX_DESTROY(&nxgedebuglock);
5485 5486 return (status);
5486 5487 }
5487 5488
5488 5489 int
5489 5490 _fini(void)
5490 5491 {
5491 5492 int status;
5492 5493
5493 5494 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
5494 5495 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
5495 5496
5496 5497 if (nxge_mblks_pending)
5497 5498 return (EBUSY);
5498 5499
5499 5500 status = mod_remove(&modlinkage);
5500 5501 if (status != DDI_SUCCESS) {
5501 5502 NXGE_DEBUG_MSG((NULL, MOD_CTL,
5502 5503 "Module removal failed 0x%08x",
5503 5504 status));
5504 5505 goto _fini_exit;
5505 5506 }
5506 5507
5507 5508 mac_fini_ops(&nxge_dev_ops);
5508 5509
5509 5510 ddi_soft_state_fini(&nxge_list);
5510 5511
5511 5512 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5512 5513
5513 5514 MUTEX_DESTROY(&nxge_common_lock);
5514 5515 MUTEX_DESTROY(&nxgedebuglock);
5515 5516 return (status);
5516 5517
5517 5518 _fini_exit:
5518 5519 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5519 5520 return (status);
5520 5521 }
5521 5522
5522 5523 int
5523 5524 _info(struct modinfo *modinfop)
5524 5525 {
5525 5526 int status;
5526 5527
5527 5528 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
5528 5529 status = mod_info(&modlinkage, modinfop);
5529 5530 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
5530 5531
5531 5532 return (status);
5532 5533 }
5533 5534
5534 5535 /*ARGSUSED*/
5535 5536 static int
5536 5537 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5537 5538 {
5538 5539 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5539 5540 p_nxge_t nxgep = rhp->nxgep;
5540 5541 uint32_t channel;
5541 5542 p_tx_ring_t ring;
5542 5543
5543 5544 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5544 5545 ring = nxgep->tx_rings->rings[channel];
5545 5546
5546 5547 MUTEX_ENTER(&ring->lock);
5547 5548 ASSERT(ring->tx_ring_handle == NULL);
5548 5549 ring->tx_ring_handle = rhp->ring_handle;
5549 5550 MUTEX_EXIT(&ring->lock);
5550 5551
5551 5552 return (0);
5552 5553 }
5553 5554
5554 5555 static void
5555 5556 nxge_tx_ring_stop(mac_ring_driver_t rdriver)
5556 5557 {
5557 5558 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5558 5559 p_nxge_t nxgep = rhp->nxgep;
5559 5560 uint32_t channel;
5560 5561 p_tx_ring_t ring;
5561 5562
5562 5563 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5563 5564 ring = nxgep->tx_rings->rings[channel];
5564 5565
5565 5566 MUTEX_ENTER(&ring->lock);
5566 5567 ASSERT(ring->tx_ring_handle != NULL);
5567 5568 ring->tx_ring_handle = (mac_ring_handle_t)NULL;
5568 5569 MUTEX_EXIT(&ring->lock);
5569 5570 }
5570 5571
5571 5572 int
5572 5573 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5573 5574 {
5574 5575 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5575 5576 p_nxge_t nxgep = rhp->nxgep;
5576 5577 uint32_t channel;
5577 5578 p_rx_rcr_ring_t ring;
5578 5579 int i;
5579 5580
5580 5581 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5581 5582 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5582 5583
5583 5584 MUTEX_ENTER(&ring->lock);
5584 5585
5585 5586 if (ring->started) {
5586 5587 ASSERT(ring->started == B_FALSE);
5587 5588 MUTEX_EXIT(&ring->lock);
5588 5589 return (0);
5589 5590 }
5590 5591
5591 5592 /* set rcr_ring */
5592 5593 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5593 5594 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5594 5595 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5595 5596 ring->ldvp = &nxgep->ldgvp->ldvp[i];
5596 5597 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp;
5597 5598 }
5598 5599 }
5599 5600
5600 5601 ring->rcr_mac_handle = rhp->ring_handle;
5601 5602 ring->rcr_gen_num = mr_gen_num;
5602 5603 ring->started = B_TRUE;
5603 5604 rhp->ring_gen_num = mr_gen_num;
5604 5605 MUTEX_EXIT(&ring->lock);
5605 5606
5606 5607 return (0);
5607 5608 }
5608 5609
5609 5610 static void
5610 5611 nxge_rx_ring_stop(mac_ring_driver_t rdriver)
5611 5612 {
5612 5613 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5613 5614 p_nxge_t nxgep = rhp->nxgep;
5614 5615 uint32_t channel;
5615 5616 p_rx_rcr_ring_t ring;
5616 5617
5617 5618 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5618 5619 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5619 5620
5620 5621 MUTEX_ENTER(&ring->lock);
5621 5622 ASSERT(ring->started == B_TRUE);
5622 5623 ring->rcr_mac_handle = NULL;
5623 5624 ring->ldvp = NULL;
5624 5625 ring->ldgp = NULL;
5625 5626 ring->started = B_FALSE;
5626 5627 MUTEX_EXIT(&ring->lock);
5627 5628 }
5628 5629
5629 5630 static int
5630 5631 nxge_ring_get_htable_idx(p_nxge_t nxgep, mac_ring_type_t type, uint32_t channel)
5631 5632 {
5632 5633 int i;
5633 5634
5634 5635 #if defined(sun4v)
5635 5636 if (isLDOMguest(nxgep)) {
5636 5637 return (nxge_hio_get_dc_htable_idx(nxgep,
5637 5638 (type == MAC_RING_TYPE_TX) ? VP_BOUND_TX : VP_BOUND_RX,
5638 5639 channel));
5639 5640 }
5640 5641 #endif
5641 5642
5642 5643 ASSERT(nxgep->ldgvp != NULL);
5643 5644
5644 5645 switch (type) {
5645 5646 case MAC_RING_TYPE_TX:
5646 5647 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5647 5648 if ((nxgep->ldgvp->ldvp[i].is_txdma) &&
5648 5649 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5649 5650 return ((int)
5650 5651 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5651 5652 }
5652 5653 }
5653 5654 break;
5654 5655
5655 5656 case MAC_RING_TYPE_RX:
5656 5657 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5657 5658 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5658 5659 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5659 5660 return ((int)
5660 5661 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5661 5662 }
5662 5663 }
5663 5664 }
5664 5665
5665 5666 return (-1);
5666 5667 }
5667 5668
5668 5669 /*
5669 5670 * Callback funtion for MAC layer to register all rings.
5670 5671 */
5671 5672 static void
5672 5673 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
5673 5674 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5674 5675 {
5675 5676 p_nxge_t nxgep = (p_nxge_t)arg;
5676 5677 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
5677 5678 p_nxge_intr_t intrp;
5678 5679 uint32_t channel;
5679 5680 int htable_idx;
5680 5681 p_nxge_ring_handle_t rhandlep;
5681 5682
5682 5683 ASSERT(nxgep != NULL);
5683 5684 ASSERT(p_cfgp != NULL);
5684 5685 ASSERT(infop != NULL);
5685 5686
5686 5687 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5687 5688 "==> nxge_fill_ring 0x%x index %d", rtype, index));
5688 5689
5689 5690
5690 5691 switch (rtype) {
5691 5692 case MAC_RING_TYPE_TX: {
5692 5693 mac_intr_t *mintr = &infop->mri_intr;
5693 5694
5694 5695 NXGE_DEBUG_MSG((nxgep, TX_CTL,
5695 5696 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d",
5696 5697 rtype, index, p_cfgp->tdc.count));
5697 5698
5698 5699 ASSERT((index >= 0) && (index < p_cfgp->tdc.count));
5699 5700 rhandlep = &nxgep->tx_ring_handles[index];
5700 5701 rhandlep->nxgep = nxgep;
5701 5702 rhandlep->index = index;
5702 5703 rhandlep->ring_handle = rh;
5703 5704
5704 5705 channel = nxgep->pt_config.hw_config.tdc.start + index;
5705 5706 rhandlep->channel = channel;
5706 5707 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5707 5708 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5708 5709 channel);
5709 5710 if (htable_idx >= 0)
5710 5711 mintr->mi_ddi_handle = intrp->htable[htable_idx];
5711 5712 else
5712 5713 mintr->mi_ddi_handle = NULL;
5713 5714
5714 5715 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5715 5716 infop->mri_start = nxge_tx_ring_start;
5716 5717 infop->mri_stop = nxge_tx_ring_stop;
5717 5718 infop->mri_tx = nxge_tx_ring_send;
5718 5719 infop->mri_stat = nxge_tx_ring_stat;
5719 5720 infop->mri_flags = MAC_RING_TX_SERIALIZE;
5720 5721 break;
5721 5722 }
5722 5723
5723 5724 case MAC_RING_TYPE_RX: {
5724 5725 mac_intr_t nxge_mac_intr;
5725 5726 int nxge_rindex;
5726 5727 p_nxge_intr_t intrp;
5727 5728
5728 5729 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5729 5730
5730 5731 NXGE_DEBUG_MSG((nxgep, RX_CTL,
5731 5732 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d",
5732 5733 rtype, index, p_cfgp->max_rdcs));
5733 5734
5734 5735 /*
5735 5736 * 'index' is the ring index within the group.
5736 5737 * Find the ring index in the nxge instance.
5737 5738 */
5738 5739 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index);
5739 5740 channel = nxgep->pt_config.hw_config.start_rdc + index;
5740 5741 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5741 5742
5742 5743 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs));
5743 5744 rhandlep = &nxgep->rx_ring_handles[nxge_rindex];
5744 5745 rhandlep->nxgep = nxgep;
5745 5746 rhandlep->index = nxge_rindex;
5746 5747 rhandlep->ring_handle = rh;
5747 5748 rhandlep->channel = channel;
5748 5749
5749 5750 /*
5750 5751 * Entrypoint to enable interrupt (disable poll) and
5751 5752 * disable interrupt (enable poll).
5752 5753 */
5753 5754 bzero(&nxge_mac_intr, sizeof (nxge_mac_intr));
5754 5755 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep;
5755 5756 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll;
5756 5757 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll;
5757 5758
5758 5759 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5759 5760 channel);
5760 5761 if (htable_idx >= 0)
5761 5762 nxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
5762 5763 else
5763 5764 nxge_mac_intr.mi_ddi_handle = NULL;
5764 5765
5765 5766 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5766 5767 infop->mri_start = nxge_rx_ring_start;
5767 5768 infop->mri_stop = nxge_rx_ring_stop;
5768 5769 infop->mri_intr = nxge_mac_intr;
5769 5770 infop->mri_poll = nxge_rx_poll;
5770 5771 infop->mri_stat = nxge_rx_ring_stat;
5771 5772 infop->mri_flags = MAC_RING_RX_ENQUEUE;
5772 5773 break;
5773 5774 }
5774 5775
5775 5776 default:
5776 5777 break;
5777 5778 }
5778 5779
5779 5780 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", rtype));
5780 5781 }
5781 5782
5782 5783 static void
5783 5784 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5784 5785 mac_ring_type_t type)
5785 5786 {
5786 5787 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5787 5788 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5788 5789 nxge_t *nxge;
5789 5790 nxge_grp_t *grp;
5790 5791 nxge_rdc_grp_t *rdc_grp;
5791 5792 uint16_t channel; /* device-wise ring id */
5792 5793 int dev_gindex;
5793 5794 int rv;
5794 5795
5795 5796 nxge = rgroup->nxgep;
5796 5797
5797 5798 switch (type) {
5798 5799 case MAC_RING_TYPE_TX:
5799 5800 /*
5800 5801 * nxge_grp_dc_add takes a channel number which is a
5801 5802 * "devise" ring ID.
5802 5803 */
5803 5804 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5804 5805
5805 5806 /*
5806 5807 * Remove the ring from the default group
5807 5808 */
5808 5809 if (rgroup->gindex != 0) {
5809 5810 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5810 5811 }
5811 5812
5812 5813 /*
5813 5814 * nxge->tx_set.group[] is an array of groups indexed by
5814 5815 * a "port" group ID.
5815 5816 */
5816 5817 grp = nxge->tx_set.group[rgroup->gindex];
5817 5818 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5818 5819 if (rv != 0) {
5819 5820 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5820 5821 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5821 5822 }
5822 5823 break;
5823 5824
5824 5825 case MAC_RING_TYPE_RX:
5825 5826 /*
5826 5827 * nxge->rx_set.group[] is an array of groups indexed by
5827 5828 * a "port" group ID.
5828 5829 */
5829 5830 grp = nxge->rx_set.group[rgroup->gindex];
5830 5831
5831 5832 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5832 5833 rgroup->gindex;
5833 5834 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5834 5835
5835 5836 /*
5836 5837 * nxge_grp_dc_add takes a channel number which is a
5837 5838 * "devise" ring ID.
5838 5839 */
5839 5840 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index;
5840 5841 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel);
5841 5842 if (rv != 0) {
5842 5843 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5843 5844 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5844 5845 }
5845 5846
5846 5847 rdc_grp->map |= (1 << channel);
5847 5848 rdc_grp->max_rdcs++;
5848 5849
5849 5850 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5850 5851 break;
5851 5852 }
5852 5853 }
5853 5854
5854 5855 static void
5855 5856 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5856 5857 mac_ring_type_t type)
5857 5858 {
5858 5859 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5859 5860 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5860 5861 nxge_t *nxge;
5861 5862 uint16_t channel; /* device-wise ring id */
5862 5863 nxge_rdc_grp_t *rdc_grp;
5863 5864 int dev_gindex;
5864 5865
5865 5866 nxge = rgroup->nxgep;
5866 5867
5867 5868 switch (type) {
5868 5869 case MAC_RING_TYPE_TX:
5869 5870 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid +
5870 5871 rgroup->gindex;
5871 5872 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5872 5873 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5873 5874
5874 5875 /*
5875 5876 * Add the ring back to the default group
5876 5877 */
5877 5878 if (rgroup->gindex != 0) {
5878 5879 nxge_grp_t *grp;
5879 5880 grp = nxge->tx_set.group[0];
5880 5881 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5881 5882 }
5882 5883 break;
5883 5884
5884 5885 case MAC_RING_TYPE_RX:
5885 5886 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5886 5887 rgroup->gindex;
5887 5888 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5888 5889 channel = rdc_grp->start_rdc + rhandle->index;
5889 5890 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
5890 5891
5891 5892 rdc_grp->map &= ~(1 << channel);
5892 5893 rdc_grp->max_rdcs--;
5893 5894
5894 5895 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5895 5896 break;
5896 5897 }
5897 5898 }
5898 5899
5899 5900
5900 5901 /*ARGSUSED*/
5901 5902 static nxge_status_t
5902 5903 nxge_add_intrs(p_nxge_t nxgep)
5903 5904 {
5904 5905
5905 5906 int intr_types;
5906 5907 int type = 0;
5907 5908 int ddi_status = DDI_SUCCESS;
5908 5909 nxge_status_t status = NXGE_OK;
5909 5910
5910 5911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
5911 5912
5912 5913 nxgep->nxge_intr_type.intr_registered = B_FALSE;
5913 5914 nxgep->nxge_intr_type.intr_enabled = B_FALSE;
5914 5915 nxgep->nxge_intr_type.msi_intx_cnt = 0;
5915 5916 nxgep->nxge_intr_type.intr_added = 0;
5916 5917 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
5917 5918 nxgep->nxge_intr_type.intr_type = 0;
5918 5919
5919 5920 if (nxgep->niu_type == N2_NIU) {
5920 5921 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5921 5922 } else if (nxge_msi_enable) {
5922 5923 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5923 5924 }
5924 5925
5925 5926 /* Get the supported interrupt types */
5926 5927 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
5927 5928 != DDI_SUCCESS) {
5928 5929 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
5929 5930 "ddi_intr_get_supported_types failed: status 0x%08x",
5930 5931 ddi_status));
5931 5932 return (NXGE_ERROR | NXGE_DDI_FAILED);
5932 5933 }
5933 5934 nxgep->nxge_intr_type.intr_types = intr_types;
5934 5935
5935 5936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5936 5937 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5937 5938
5938 5939 /*
5939 5940 * Solaris MSIX is not supported yet. use MSI for now.
5940 5941 * nxge_msi_enable (1):
5941 5942 * 1 - MSI 2 - MSI-X others - FIXED
5942 5943 */
5943 5944 switch (nxge_msi_enable) {
5944 5945 default:
5945 5946 type = DDI_INTR_TYPE_FIXED;
5946 5947 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5947 5948 "use fixed (intx emulation) type %08x",
5948 5949 type));
5949 5950 break;
5950 5951
5951 5952 case 2:
5952 5953 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5953 5954 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5954 5955 if (intr_types & DDI_INTR_TYPE_MSIX) {
5955 5956 type = DDI_INTR_TYPE_MSIX;
5956 5957 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5957 5958 "ddi_intr_get_supported_types: MSIX 0x%08x",
5958 5959 type));
5959 5960 } else if (intr_types & DDI_INTR_TYPE_MSI) {
5960 5961 type = DDI_INTR_TYPE_MSI;
5961 5962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5962 5963 "ddi_intr_get_supported_types: MSI 0x%08x",
5963 5964 type));
5964 5965 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5965 5966 type = DDI_INTR_TYPE_FIXED;
5966 5967 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5967 5968 "ddi_intr_get_supported_types: MSXED0x%08x",
5968 5969 type));
5969 5970 }
5970 5971 break;
5971 5972
5972 5973 case 1:
5973 5974 if (intr_types & DDI_INTR_TYPE_MSI) {
5974 5975 type = DDI_INTR_TYPE_MSI;
5975 5976 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5976 5977 "ddi_intr_get_supported_types: MSI 0x%08x",
5977 5978 type));
5978 5979 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
5979 5980 type = DDI_INTR_TYPE_MSIX;
5980 5981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5981 5982 "ddi_intr_get_supported_types: MSIX 0x%08x",
5982 5983 type));
5983 5984 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5984 5985 type = DDI_INTR_TYPE_FIXED;
5985 5986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5986 5987 "ddi_intr_get_supported_types: MSXED0x%08x",
5987 5988 type));
5988 5989 }
5989 5990 }
5990 5991
5991 5992 nxgep->nxge_intr_type.intr_type = type;
5992 5993 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
5993 5994 type == DDI_INTR_TYPE_FIXED) &&
5994 5995 nxgep->nxge_intr_type.niu_msi_enable) {
5995 5996 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
5996 5997 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5997 5998 " nxge_add_intrs: "
5998 5999 " nxge_add_intrs_adv failed: status 0x%08x",
5999 6000 status));
6000 6001 return (status);
6001 6002 } else {
6002 6003 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
6003 6004 "interrupts registered : type %d", type));
6004 6005 nxgep->nxge_intr_type.intr_registered = B_TRUE;
6005 6006
6006 6007 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6007 6008 "\nAdded advanced nxge add_intr_adv "
6008 6009 "intr type 0x%x\n", type));
6009 6010
6010 6011 return (status);
6011 6012 }
6012 6013 }
6013 6014
6014 6015 if (!nxgep->nxge_intr_type.intr_registered) {
6015 6016 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
6016 6017 "failed to register interrupts"));
6017 6018 return (NXGE_ERROR | NXGE_DDI_FAILED);
6018 6019 }
6019 6020
6020 6021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
6021 6022 return (status);
6022 6023 }
6023 6024
6024 6025 static nxge_status_t
6025 6026 nxge_add_intrs_adv(p_nxge_t nxgep)
6026 6027 {
6027 6028 int intr_type;
6028 6029 p_nxge_intr_t intrp;
6029 6030
6030 6031 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
6031 6032
6032 6033 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6033 6034 intr_type = intrp->intr_type;
6034 6035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
6035 6036 intr_type));
6036 6037
6037 6038 switch (intr_type) {
6038 6039 case DDI_INTR_TYPE_MSI: /* 0x2 */
6039 6040 case DDI_INTR_TYPE_MSIX: /* 0x4 */
6040 6041 return (nxge_add_intrs_adv_type(nxgep, intr_type));
6041 6042
6042 6043 case DDI_INTR_TYPE_FIXED: /* 0x1 */
6043 6044 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
6044 6045
6045 6046 default:
6046 6047 return (NXGE_ERROR);
6047 6048 }
6048 6049 }
6049 6050
6050 6051
6051 6052 /*ARGSUSED*/
6052 6053 static nxge_status_t
6053 6054 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
6054 6055 {
6055 6056 dev_info_t *dip = nxgep->dip;
6056 6057 p_nxge_ldg_t ldgp;
6057 6058 p_nxge_intr_t intrp;
6058 6059 uint_t *inthandler;
6059 6060 void *arg1, *arg2;
6060 6061 int behavior;
6061 6062 int nintrs, navail, nrequest;
6062 6063 int nactual, nrequired;
6063 6064 int inum = 0;
6064 6065 int x, y;
6065 6066 int ddi_status = DDI_SUCCESS;
6066 6067 nxge_status_t status = NXGE_OK;
6067 6068
6068 6069 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
6069 6070 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6070 6071 intrp->start_inum = 0;
6071 6072
6072 6073 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6073 6074 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6074 6075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6075 6076 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6076 6077 "nintrs: %d", ddi_status, nintrs));
6077 6078 return (NXGE_ERROR | NXGE_DDI_FAILED);
6078 6079 }
6079 6080
6080 6081 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6081 6082 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6082 6083 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6083 6084 "ddi_intr_get_navail() failed, status: 0x%x%, "
6084 6085 "nintrs: %d", ddi_status, navail));
6085 6086 return (NXGE_ERROR | NXGE_DDI_FAILED);
6086 6087 }
6087 6088
6088 6089 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6089 6090 "ddi_intr_get_navail() returned: nintrs %d, navail %d",
6090 6091 nintrs, navail));
6091 6092
6092 6093 /* PSARC/2007/453 MSI-X interrupt limit override */
6093 6094 if (int_type == DDI_INTR_TYPE_MSIX) {
6094 6095 nrequest = nxge_create_msi_property(nxgep);
6095 6096 if (nrequest < navail) {
6096 6097 navail = nrequest;
6097 6098 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6098 6099 "nxge_add_intrs_adv_type: nintrs %d "
6099 6100 "navail %d (nrequest %d)",
6100 6101 nintrs, navail, nrequest));
6101 6102 }
6102 6103 }
6103 6104
6104 6105 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
6105 6106 /* MSI must be power of 2 */
6106 6107 if ((navail & 16) == 16) {
6107 6108 navail = 16;
6108 6109 } else if ((navail & 8) == 8) {
6109 6110 navail = 8;
6110 6111 } else if ((navail & 4) == 4) {
6111 6112 navail = 4;
6112 6113 } else if ((navail & 2) == 2) {
6113 6114 navail = 2;
6114 6115 } else {
6115 6116 navail = 1;
6116 6117 }
6117 6118 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6118 6119 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
6119 6120 "navail %d", nintrs, navail));
6120 6121 }
6121 6122
6122 6123 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6123 6124 DDI_INTR_ALLOC_NORMAL);
6124 6125 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6125 6126 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6126 6127 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6127 6128 navail, &nactual, behavior);
6128 6129 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6129 6130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6130 6131 " ddi_intr_alloc() failed: %d",
6131 6132 ddi_status));
6132 6133 kmem_free(intrp->htable, intrp->intr_size);
6133 6134 return (NXGE_ERROR | NXGE_DDI_FAILED);
6134 6135 }
6135 6136
6136 6137 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6137 6138 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6138 6139 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6139 6140 " ddi_intr_get_pri() failed: %d",
6140 6141 ddi_status));
6141 6142 /* Free already allocated interrupts */
6142 6143 for (y = 0; y < nactual; y++) {
6143 6144 (void) ddi_intr_free(intrp->htable[y]);
6144 6145 }
6145 6146
6146 6147 kmem_free(intrp->htable, intrp->intr_size);
6147 6148 return (NXGE_ERROR | NXGE_DDI_FAILED);
6148 6149 }
6149 6150
6150 6151 nrequired = 0;
6151 6152 switch (nxgep->niu_type) {
6152 6153 default:
6153 6154 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6154 6155 break;
6155 6156
6156 6157 case N2_NIU:
6157 6158 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6158 6159 break;
6159 6160 }
6160 6161
6161 6162 if (status != NXGE_OK) {
6162 6163 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6163 6164 "nxge_add_intrs_adv_typ:nxge_ldgv_init "
6164 6165 "failed: 0x%x", status));
6165 6166 /* Free already allocated interrupts */
6166 6167 for (y = 0; y < nactual; y++) {
6167 6168 (void) ddi_intr_free(intrp->htable[y]);
6168 6169 }
6169 6170
6170 6171 kmem_free(intrp->htable, intrp->intr_size);
6171 6172 return (status);
6172 6173 }
6173 6174
6174 6175 ldgp = nxgep->ldgvp->ldgp;
6175 6176 for (x = 0; x < nrequired; x++, ldgp++) {
6176 6177 ldgp->vector = (uint8_t)x;
6177 6178 ldgp->intdata = SID_DATA(ldgp->func, x);
6178 6179 arg1 = ldgp->ldvp;
6179 6180 arg2 = nxgep;
6180 6181 if (ldgp->nldvs == 1) {
6181 6182 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6182 6183 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6183 6184 "nxge_add_intrs_adv_type: "
6184 6185 "arg1 0x%x arg2 0x%x: "
6185 6186 "1-1 int handler (entry %d intdata 0x%x)\n",
6186 6187 arg1, arg2,
6187 6188 x, ldgp->intdata));
6188 6189 } else if (ldgp->nldvs > 1) {
6189 6190 inthandler = (uint_t *)ldgp->sys_intr_handler;
6190 6191 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6191 6192 "nxge_add_intrs_adv_type: "
6192 6193 "arg1 0x%x arg2 0x%x: "
6193 6194 "nldevs %d int handler "
6194 6195 "(entry %d intdata 0x%x)\n",
6195 6196 arg1, arg2,
6196 6197 ldgp->nldvs, x, ldgp->intdata));
6197 6198 }
6198 6199
6199 6200 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6200 6201 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
6201 6202 "htable 0x%llx", x, intrp->htable[x]));
6202 6203
6203 6204 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6204 6205 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6205 6206 != DDI_SUCCESS) {
6206 6207 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6207 6208 "==> nxge_add_intrs_adv_type: failed #%d "
6208 6209 "status 0x%x", x, ddi_status));
6209 6210 for (y = 0; y < intrp->intr_added; y++) {
6210 6211 (void) ddi_intr_remove_handler(
6211 6212 intrp->htable[y]);
6212 6213 }
6213 6214 /* Free already allocated intr */
6214 6215 for (y = 0; y < nactual; y++) {
6215 6216 (void) ddi_intr_free(intrp->htable[y]);
6216 6217 }
6217 6218 kmem_free(intrp->htable, intrp->intr_size);
6218 6219
6219 6220 (void) nxge_ldgv_uninit(nxgep);
6220 6221
6221 6222 return (NXGE_ERROR | NXGE_DDI_FAILED);
6222 6223 }
6223 6224
6224 6225 ldgp->htable_idx = x;
6225 6226 intrp->intr_added++;
6226 6227 }
6227 6228
6228 6229 intrp->msi_intx_cnt = nactual;
6229 6230
6230 6231 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6231 6232 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6232 6233 navail, nactual,
6233 6234 intrp->msi_intx_cnt,
6234 6235 intrp->intr_added));
6235 6236
6236 6237 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6237 6238
6238 6239 (void) nxge_intr_ldgv_init(nxgep);
6239 6240
6240 6241 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
6241 6242
6242 6243 return (status);
6243 6244 }
6244 6245
6245 6246 /*ARGSUSED*/
6246 6247 static nxge_status_t
6247 6248 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
6248 6249 {
6249 6250 dev_info_t *dip = nxgep->dip;
6250 6251 p_nxge_ldg_t ldgp;
6251 6252 p_nxge_intr_t intrp;
6252 6253 uint_t *inthandler;
6253 6254 void *arg1, *arg2;
6254 6255 int behavior;
6255 6256 int nintrs, navail;
6256 6257 int nactual, nrequired;
6257 6258 int inum = 0;
6258 6259 int x, y;
6259 6260 int ddi_status = DDI_SUCCESS;
6260 6261 nxge_status_t status = NXGE_OK;
6261 6262
6262 6263 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
6263 6264 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6264 6265 intrp->start_inum = 0;
6265 6266
6266 6267 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6267 6268 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6268 6269 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6269 6270 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6270 6271 "nintrs: %d", status, nintrs));
6271 6272 return (NXGE_ERROR | NXGE_DDI_FAILED);
6272 6273 }
6273 6274
6274 6275 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6275 6276 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6276 6277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6277 6278 "ddi_intr_get_navail() failed, status: 0x%x%, "
6278 6279 "nintrs: %d", ddi_status, navail));
6279 6280 return (NXGE_ERROR | NXGE_DDI_FAILED);
6280 6281 }
6281 6282
6282 6283 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6283 6284 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6284 6285 nintrs, navail));
6285 6286
6286 6287 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6287 6288 DDI_INTR_ALLOC_NORMAL);
6288 6289 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6289 6290 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6290 6291 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6291 6292 navail, &nactual, behavior);
6292 6293 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6293 6294 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6294 6295 " ddi_intr_alloc() failed: %d",
6295 6296 ddi_status));
6296 6297 kmem_free(intrp->htable, intrp->intr_size);
6297 6298 return (NXGE_ERROR | NXGE_DDI_FAILED);
6298 6299 }
6299 6300
6300 6301 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6301 6302 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6302 6303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6303 6304 " ddi_intr_get_pri() failed: %d",
6304 6305 ddi_status));
6305 6306 /* Free already allocated interrupts */
6306 6307 for (y = 0; y < nactual; y++) {
6307 6308 (void) ddi_intr_free(intrp->htable[y]);
6308 6309 }
6309 6310
6310 6311 kmem_free(intrp->htable, intrp->intr_size);
6311 6312 return (NXGE_ERROR | NXGE_DDI_FAILED);
6312 6313 }
6313 6314
6314 6315 nrequired = 0;
6315 6316 switch (nxgep->niu_type) {
6316 6317 default:
6317 6318 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6318 6319 break;
6319 6320
6320 6321 case N2_NIU:
6321 6322 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6322 6323 break;
6323 6324 }
6324 6325
6325 6326 if (status != NXGE_OK) {
6326 6327 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6327 6328 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6328 6329 "failed: 0x%x", status));
6329 6330 /* Free already allocated interrupts */
6330 6331 for (y = 0; y < nactual; y++) {
6331 6332 (void) ddi_intr_free(intrp->htable[y]);
6332 6333 }
6333 6334
6334 6335 kmem_free(intrp->htable, intrp->intr_size);
6335 6336 return (status);
6336 6337 }
6337 6338
6338 6339 ldgp = nxgep->ldgvp->ldgp;
6339 6340 for (x = 0; x < nrequired; x++, ldgp++) {
6340 6341 ldgp->vector = (uint8_t)x;
6341 6342 if (nxgep->niu_type != N2_NIU) {
6342 6343 ldgp->intdata = SID_DATA(ldgp->func, x);
6343 6344 }
6344 6345
6345 6346 arg1 = ldgp->ldvp;
6346 6347 arg2 = nxgep;
6347 6348 if (ldgp->nldvs == 1) {
6348 6349 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6349 6350 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6350 6351 "nxge_add_intrs_adv_type_fix: "
6351 6352 "1-1 int handler(%d) ldg %d ldv %d "
6352 6353 "arg1 $%p arg2 $%p\n",
6353 6354 x, ldgp->ldg, ldgp->ldvp->ldv,
6354 6355 arg1, arg2));
6355 6356 } else if (ldgp->nldvs > 1) {
6356 6357 inthandler = (uint_t *)ldgp->sys_intr_handler;
6357 6358 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6358 6359 "nxge_add_intrs_adv_type_fix: "
6359 6360 "shared ldv %d int handler(%d) ldv %d ldg %d"
6360 6361 "arg1 0x%016llx arg2 0x%016llx\n",
6361 6362 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
6362 6363 arg1, arg2));
6363 6364 }
6364 6365
6365 6366 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6366 6367 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6367 6368 != DDI_SUCCESS) {
6368 6369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6369 6370 "==> nxge_add_intrs_adv_type_fix: failed #%d "
6370 6371 "status 0x%x", x, ddi_status));
6371 6372 for (y = 0; y < intrp->intr_added; y++) {
6372 6373 (void) ddi_intr_remove_handler(
6373 6374 intrp->htable[y]);
6374 6375 }
6375 6376 for (y = 0; y < nactual; y++) {
6376 6377 (void) ddi_intr_free(intrp->htable[y]);
6377 6378 }
6378 6379 /* Free already allocated intr */
6379 6380 kmem_free(intrp->htable, intrp->intr_size);
6380 6381
6381 6382 (void) nxge_ldgv_uninit(nxgep);
6382 6383
6383 6384 return (NXGE_ERROR | NXGE_DDI_FAILED);
6384 6385 }
6385 6386
6386 6387 ldgp->htable_idx = x;
6387 6388 intrp->intr_added++;
6388 6389 }
6389 6390
6390 6391 intrp->msi_intx_cnt = nactual;
6391 6392
6392 6393 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6393 6394
6394 6395 status = nxge_intr_ldgv_init(nxgep);
6395 6396 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
6396 6397
6397 6398 return (status);
6398 6399 }
6399 6400
6400 6401 static void
6401 6402 nxge_remove_intrs(p_nxge_t nxgep)
6402 6403 {
6403 6404 int i, inum;
6404 6405 p_nxge_intr_t intrp;
6405 6406
6406 6407 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
6407 6408 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6408 6409 if (!intrp->intr_registered) {
6409 6410 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6410 6411 "<== nxge_remove_intrs: interrupts not registered"));
6411 6412 return;
6412 6413 }
6413 6414
6414 6415 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
6415 6416
6416 6417 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6417 6418 (void) ddi_intr_block_disable(intrp->htable,
6418 6419 intrp->intr_added);
6419 6420 } else {
6420 6421 for (i = 0; i < intrp->intr_added; i++) {
6421 6422 (void) ddi_intr_disable(intrp->htable[i]);
6422 6423 }
6423 6424 }
6424 6425
6425 6426 for (inum = 0; inum < intrp->intr_added; inum++) {
6426 6427 if (intrp->htable[inum]) {
6427 6428 (void) ddi_intr_remove_handler(intrp->htable[inum]);
6428 6429 }
6429 6430 }
6430 6431
6431 6432 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
6432 6433 if (intrp->htable[inum]) {
6433 6434 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6434 6435 "nxge_remove_intrs: ddi_intr_free inum %d "
6435 6436 "msi_intx_cnt %d intr_added %d",
6436 6437 inum,
6437 6438 intrp->msi_intx_cnt,
6438 6439 intrp->intr_added));
6439 6440
6440 6441 (void) ddi_intr_free(intrp->htable[inum]);
6441 6442 }
6442 6443 }
6443 6444
6444 6445 kmem_free(intrp->htable, intrp->intr_size);
6445 6446 intrp->intr_registered = B_FALSE;
6446 6447 intrp->intr_enabled = B_FALSE;
6447 6448 intrp->msi_intx_cnt = 0;
6448 6449 intrp->intr_added = 0;
6449 6450
6450 6451 (void) nxge_ldgv_uninit(nxgep);
6451 6452
6452 6453 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
6453 6454 "#msix-request");
6454 6455
6455 6456 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
6456 6457 }
6457 6458
6458 6459 /*ARGSUSED*/
6459 6460 static void
6460 6461 nxge_intrs_enable(p_nxge_t nxgep)
6461 6462 {
6462 6463 p_nxge_intr_t intrp;
6463 6464 int i;
6464 6465 int status;
6465 6466
6466 6467 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
6467 6468
6468 6469 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6469 6470
6470 6471 if (!intrp->intr_registered) {
6471 6472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
6472 6473 "interrupts are not registered"));
6473 6474 return;
6474 6475 }
6475 6476
6476 6477 if (intrp->intr_enabled) {
6477 6478 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6478 6479 "<== nxge_intrs_enable: already enabled"));
6479 6480 return;
6480 6481 }
6481 6482
6482 6483 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6483 6484 status = ddi_intr_block_enable(intrp->htable,
6484 6485 intrp->intr_added);
6485 6486 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6486 6487 "block enable - status 0x%x total inums #%d\n",
6487 6488 status, intrp->intr_added));
6488 6489 } else {
6489 6490 for (i = 0; i < intrp->intr_added; i++) {
6490 6491 status = ddi_intr_enable(intrp->htable[i]);
6491 6492 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6492 6493 "ddi_intr_enable:enable - status 0x%x "
6493 6494 "total inums %d enable inum #%d\n",
6494 6495 status, intrp->intr_added, i));
6495 6496 if (status == DDI_SUCCESS) {
6496 6497 intrp->intr_enabled = B_TRUE;
6497 6498 }
6498 6499 }
6499 6500 }
6500 6501
6501 6502 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
6502 6503 }
6503 6504
6504 6505 /*ARGSUSED*/
6505 6506 static void
6506 6507 nxge_intrs_disable(p_nxge_t nxgep)
6507 6508 {
6508 6509 p_nxge_intr_t intrp;
6509 6510 int i;
6510 6511
6511 6512 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
6512 6513
6513 6514 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6514 6515
6515 6516 if (!intrp->intr_registered) {
6516 6517 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
6517 6518 "interrupts are not registered"));
6518 6519 return;
6519 6520 }
6520 6521
6521 6522 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6522 6523 (void) ddi_intr_block_disable(intrp->htable,
6523 6524 intrp->intr_added);
6524 6525 } else {
6525 6526 for (i = 0; i < intrp->intr_added; i++) {
6526 6527 (void) ddi_intr_disable(intrp->htable[i]);
6527 6528 }
6528 6529 }
6529 6530
6530 6531 intrp->intr_enabled = B_FALSE;
6531 6532 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
6532 6533 }
6533 6534
6534 6535 nxge_status_t
6535 6536 nxge_mac_register(p_nxge_t nxgep)
6536 6537 {
6537 6538 mac_register_t *macp;
6538 6539 int status;
6539 6540
6540 6541 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
6541 6542
6542 6543 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
6543 6544 return (NXGE_ERROR);
6544 6545
6545 6546 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
6546 6547 macp->m_driver = nxgep;
6547 6548 macp->m_dip = nxgep->dip;
6548 6549 if (!isLDOMguest(nxgep)) {
6549 6550 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
6550 6551 } else {
6551 6552 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6552 6553 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6553 6554 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
6554 6555 }
6555 6556 macp->m_callbacks = &nxge_m_callbacks;
6556 6557 macp->m_min_sdu = 0;
6557 6558 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
6558 6559 NXGE_EHEADER_VLAN_CRC;
6559 6560 macp->m_max_sdu = nxgep->mac.default_mtu;
6560 6561 macp->m_margin = VLAN_TAGSZ;
6561 6562 macp->m_priv_props = nxge_priv_props;
6562 6563 if (isLDOMguest(nxgep))
6563 6564 macp->m_v12n = MAC_VIRT_LEVEL1;
6564 6565 else
6565 6566 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1;
6566 6567
6567 6568 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
6568 6569 "==> nxge_mac_register: instance %d "
6569 6570 "max_sdu %d margin %d maxframe %d (header %d)",
6570 6571 nxgep->instance,
6571 6572 macp->m_max_sdu, macp->m_margin,
6572 6573 nxgep->mac.maxframesize,
6573 6574 NXGE_EHEADER_VLAN_CRC));
6574 6575
6575 6576 status = mac_register(macp, &nxgep->mach);
6576 6577 if (isLDOMguest(nxgep)) {
6577 6578 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN);
6578 6579 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN);
6579 6580 }
6580 6581 mac_free(macp);
6581 6582
6582 6583 if (status != 0) {
6583 6584 cmn_err(CE_WARN,
6584 6585 "!nxge_mac_register failed (status %d instance %d)",
6585 6586 status, nxgep->instance);
6586 6587 return (NXGE_ERROR);
6587 6588 }
6588 6589
6589 6590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
6590 6591 "(instance %d)", nxgep->instance));
6591 6592
6592 6593 return (NXGE_OK);
6593 6594 }
6594 6595
6595 6596 void
6596 6597 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
6597 6598 {
6598 6599 ssize_t size;
6599 6600 mblk_t *nmp;
6600 6601 uint8_t blk_id;
6601 6602 uint8_t chan;
6602 6603 uint32_t err_id;
6603 6604 err_inject_t *eip;
6604 6605
6605 6606 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
6606 6607
6607 6608 size = 1024;
6608 6609 nmp = mp->b_cont;
6609 6610 eip = (err_inject_t *)nmp->b_rptr;
6610 6611 blk_id = eip->blk_id;
6611 6612 err_id = eip->err_id;
6612 6613 chan = eip->chan;
6613 6614 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
6614 6615 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
6615 6616 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
6616 6617 switch (blk_id) {
6617 6618 case MAC_BLK_ID:
6618 6619 break;
6619 6620 case TXMAC_BLK_ID:
6620 6621 break;
6621 6622 case RXMAC_BLK_ID:
6622 6623 break;
6623 6624 case MIF_BLK_ID:
6624 6625 break;
6625 6626 case IPP_BLK_ID:
6626 6627 nxge_ipp_inject_err(nxgep, err_id);
6627 6628 break;
6628 6629 case TXC_BLK_ID:
6629 6630 nxge_txc_inject_err(nxgep, err_id);
6630 6631 break;
6631 6632 case TXDMA_BLK_ID:
6632 6633 nxge_txdma_inject_err(nxgep, err_id, chan);
6633 6634 break;
6634 6635 case RXDMA_BLK_ID:
6635 6636 nxge_rxdma_inject_err(nxgep, err_id, chan);
6636 6637 break;
6637 6638 case ZCP_BLK_ID:
6638 6639 nxge_zcp_inject_err(nxgep, err_id);
6639 6640 break;
6640 6641 case ESPC_BLK_ID:
6641 6642 break;
6642 6643 case FFLP_BLK_ID:
6643 6644 break;
6644 6645 case PHY_BLK_ID:
6645 6646 break;
6646 6647 case ETHER_SERDES_BLK_ID:
6647 6648 break;
6648 6649 case PCIE_SERDES_BLK_ID:
6649 6650 break;
6650 6651 case VIR_BLK_ID:
6651 6652 break;
6652 6653 }
6653 6654
6654 6655 nmp->b_wptr = nmp->b_rptr + size;
6655 6656 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
6656 6657
6657 6658 miocack(wq, mp, (int)size, 0);
6658 6659 }
6659 6660
6660 6661 static int
6661 6662 nxge_init_common_dev(p_nxge_t nxgep)
6662 6663 {
6663 6664 p_nxge_hw_list_t hw_p;
6664 6665 dev_info_t *p_dip;
6665 6666
6666 6667 ASSERT(nxgep != NULL);
6667 6668
6668 6669 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
6669 6670
6670 6671 p_dip = nxgep->p_dip;
6671 6672 MUTEX_ENTER(&nxge_common_lock);
6672 6673 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6673 6674 "==> nxge_init_common_dev:func # %d",
6674 6675 nxgep->function_num));
6675 6676 /*
6676 6677 * Loop through existing per neptune hardware list.
6677 6678 */
6678 6679 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6679 6680 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6680 6681 "==> nxge_init_common_device:func # %d "
6681 6682 "hw_p $%p parent dip $%p",
6682 6683 nxgep->function_num,
6683 6684 hw_p,
6684 6685 p_dip));
6685 6686 if (hw_p->parent_devp == p_dip) {
6686 6687 nxgep->nxge_hw_p = hw_p;
6687 6688 hw_p->ndevs++;
6688 6689 hw_p->nxge_p[nxgep->function_num] = nxgep;
6689 6690 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6690 6691 "==> nxge_init_common_device:func # %d "
6691 6692 "hw_p $%p parent dip $%p "
6692 6693 "ndevs %d (found)",
6693 6694 nxgep->function_num,
6694 6695 hw_p,
6695 6696 p_dip,
6696 6697 hw_p->ndevs));
6697 6698 break;
6698 6699 }
6699 6700 }
6700 6701
6701 6702 if (hw_p == NULL) {
6702 6703
6703 6704 char **prop_val;
6704 6705 uint_t prop_len;
6705 6706 int i;
6706 6707
6707 6708 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6708 6709 "==> nxge_init_common_device:func # %d "
6709 6710 "parent dip $%p (new)",
6710 6711 nxgep->function_num,
6711 6712 p_dip));
6712 6713 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
6713 6714 hw_p->parent_devp = p_dip;
6714 6715 hw_p->magic = NXGE_NEPTUNE_MAGIC;
6715 6716 nxgep->nxge_hw_p = hw_p;
6716 6717 hw_p->ndevs++;
6717 6718 hw_p->nxge_p[nxgep->function_num] = nxgep;
6718 6719 hw_p->next = nxge_hw_list;
6719 6720 if (nxgep->niu_type == N2_NIU) {
6720 6721 hw_p->niu_type = N2_NIU;
6721 6722 hw_p->platform_type = P_NEPTUNE_NIU;
6722 6723 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
6723 6724 } else {
6724 6725 hw_p->niu_type = NIU_TYPE_NONE;
6725 6726 hw_p->platform_type = P_NEPTUNE_NONE;
6726 6727 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
6727 6728 }
6728 6729
6729 6730 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) *
6730 6731 hw_p->tcam_size, KM_SLEEP);
6731 6732
6732 6733 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
6733 6734 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
6734 6735 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
6735 6736 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
6736 6737
6737 6738 nxge_hw_list = hw_p;
6738 6739
6739 6740 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0,
6740 6741 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
6741 6742 for (i = 0; i < prop_len; i++) {
6742 6743 if ((strcmp((caddr_t)prop_val[i],
6743 6744 NXGE_ROCK_COMPATIBLE) == 0)) {
6744 6745 hw_p->platform_type = P_NEPTUNE_ROCK;
6745 6746 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6746 6747 "ROCK hw_p->platform_type %d",
6747 6748 hw_p->platform_type));
6748 6749 break;
6749 6750 }
6750 6751 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6751 6752 "nxge_init_common_dev: read compatible"
6752 6753 " property[%d] val[%s]",
6753 6754 i, (caddr_t)prop_val[i]));
6754 6755 }
6755 6756 }
6756 6757
6757 6758 ddi_prop_free(prop_val);
6758 6759
6759 6760 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
6760 6761 }
6761 6762
6762 6763 MUTEX_EXIT(&nxge_common_lock);
6763 6764
6764 6765 nxgep->platform_type = hw_p->platform_type;
6765 6766 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d",
6766 6767 nxgep->platform_type));
6767 6768 if (nxgep->niu_type != N2_NIU) {
6768 6769 nxgep->niu_type = hw_p->niu_type;
6769 6770 }
6770 6771
6771 6772 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6772 6773 "==> nxge_init_common_device (nxge_hw_list) $%p",
6773 6774 nxge_hw_list));
6774 6775 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
6775 6776
6776 6777 return (NXGE_OK);
6777 6778 }
6778 6779
6779 6780 static void
6780 6781 nxge_uninit_common_dev(p_nxge_t nxgep)
6781 6782 {
6782 6783 p_nxge_hw_list_t hw_p, h_hw_p;
6783 6784 p_nxge_dma_pt_cfg_t p_dma_cfgp;
6784 6785 p_nxge_hw_pt_cfg_t p_cfgp;
6785 6786 dev_info_t *p_dip;
6786 6787
6787 6788 ASSERT(nxgep != NULL);
6788 6789
6789 6790 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
6790 6791 if (nxgep->nxge_hw_p == NULL) {
6791 6792 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6792 6793 "<== nxge_uninit_common_device (no common)"));
6793 6794 return;
6794 6795 }
6795 6796
6796 6797 MUTEX_ENTER(&nxge_common_lock);
6797 6798 h_hw_p = nxge_hw_list;
6798 6799 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6799 6800 p_dip = hw_p->parent_devp;
6800 6801 if (nxgep->nxge_hw_p == hw_p &&
6801 6802 p_dip == nxgep->p_dip &&
6802 6803 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
6803 6804 hw_p->magic == NXGE_NEPTUNE_MAGIC) {
6804 6805
6805 6806 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6806 6807 "==> nxge_uninit_common_device:func # %d "
6807 6808 "hw_p $%p parent dip $%p "
6808 6809 "ndevs %d (found)",
6809 6810 nxgep->function_num,
6810 6811 hw_p,
6811 6812 p_dip,
6812 6813 hw_p->ndevs));
6813 6814
6814 6815 /*
6815 6816 * Release the RDC table, a shared resoruce
6816 6817 * of the nxge hardware. The RDC table was
6817 6818 * assigned to this instance of nxge in
6818 6819 * nxge_use_cfg_dma_config().
6819 6820 */
6820 6821 if (!isLDOMguest(nxgep)) {
6821 6822 p_dma_cfgp =
6822 6823 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
6823 6824 p_cfgp =
6824 6825 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
6825 6826 (void) nxge_fzc_rdc_tbl_unbind(nxgep,
6826 6827 p_cfgp->def_mac_rxdma_grpid);
6827 6828
6828 6829 /* Cleanup any outstanding groups. */
6829 6830 nxge_grp_cleanup(nxgep);
6830 6831 }
6831 6832
6832 6833 if (hw_p->ndevs) {
6833 6834 hw_p->ndevs--;
6834 6835 }
6835 6836 hw_p->nxge_p[nxgep->function_num] = NULL;
6836 6837 if (!hw_p->ndevs) {
6837 6838 KMEM_FREE(hw_p->tcam,
6838 6839 sizeof (tcam_flow_spec_t) *
6839 6840 hw_p->tcam_size);
6840 6841 MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
6841 6842 MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
6842 6843 MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
6843 6844 MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
6844 6845 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6845 6846 "==> nxge_uninit_common_device: "
6846 6847 "func # %d "
6847 6848 "hw_p $%p parent dip $%p "
6848 6849 "ndevs %d (last)",
6849 6850 nxgep->function_num,
6850 6851 hw_p,
6851 6852 p_dip,
6852 6853 hw_p->ndevs));
6853 6854
6854 6855 nxge_hio_uninit(nxgep);
6855 6856
6856 6857 if (hw_p == nxge_hw_list) {
6857 6858 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6858 6859 "==> nxge_uninit_common_device:"
6859 6860 "remove head func # %d "
6860 6861 "hw_p $%p parent dip $%p "
6861 6862 "ndevs %d (head)",
6862 6863 nxgep->function_num,
6863 6864 hw_p,
6864 6865 p_dip,
6865 6866 hw_p->ndevs));
6866 6867 nxge_hw_list = hw_p->next;
6867 6868 } else {
6868 6869 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6869 6870 "==> nxge_uninit_common_device:"
6870 6871 "remove middle func # %d "
6871 6872 "hw_p $%p parent dip $%p "
6872 6873 "ndevs %d (middle)",
6873 6874 nxgep->function_num,
6874 6875 hw_p,
6875 6876 p_dip,
6876 6877 hw_p->ndevs));
6877 6878 h_hw_p->next = hw_p->next;
6878 6879 }
6879 6880
6880 6881 nxgep->nxge_hw_p = NULL;
6881 6882 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
6882 6883 }
6883 6884 break;
6884 6885 } else {
6885 6886 h_hw_p = hw_p;
6886 6887 }
6887 6888 }
6888 6889
6889 6890 MUTEX_EXIT(&nxge_common_lock);
6890 6891 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6891 6892 "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6892 6893 nxge_hw_list));
6893 6894
6894 6895 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
6895 6896 }
6896 6897
6897 6898 /*
6898 6899 * Determines the number of ports from the niu_type or the platform type.
6899 6900 * Returns the number of ports, or returns zero on failure.
6900 6901 */
6901 6902
6902 6903 int
6903 6904 nxge_get_nports(p_nxge_t nxgep)
6904 6905 {
6905 6906 int nports = 0;
6906 6907
6907 6908 switch (nxgep->niu_type) {
6908 6909 case N2_NIU:
6909 6910 case NEPTUNE_2_10GF:
6910 6911 nports = 2;
6911 6912 break;
6912 6913 case NEPTUNE_4_1GC:
6913 6914 case NEPTUNE_2_10GF_2_1GC:
6914 6915 case NEPTUNE_1_10GF_3_1GC:
6915 6916 case NEPTUNE_1_1GC_1_10GF_2_1GC:
6916 6917 case NEPTUNE_2_10GF_2_1GRF:
6917 6918 nports = 4;
6918 6919 break;
6919 6920 default:
6920 6921 switch (nxgep->platform_type) {
6921 6922 case P_NEPTUNE_NIU:
6922 6923 case P_NEPTUNE_ATLAS_2PORT:
6923 6924 nports = 2;
6924 6925 break;
6925 6926 case P_NEPTUNE_ATLAS_4PORT:
6926 6927 case P_NEPTUNE_MARAMBA_P0:
6927 6928 case P_NEPTUNE_MARAMBA_P1:
6928 6929 case P_NEPTUNE_ROCK:
6929 6930 case P_NEPTUNE_ALONSO:
6930 6931 nports = 4;
6931 6932 break;
6932 6933 default:
6933 6934 break;
6934 6935 }
6935 6936 break;
6936 6937 }
6937 6938
6938 6939 return (nports);
6939 6940 }
6940 6941
6941 6942 /*
6942 6943 * The following two functions are to support
6943 6944 * PSARC/2007/453 MSI-X interrupt limit override.
6944 6945 */
6945 6946 static int
6946 6947 nxge_create_msi_property(p_nxge_t nxgep)
6947 6948 {
6948 6949 int nmsi;
6949 6950 extern int ncpus;
6950 6951
6951 6952 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
6952 6953
6953 6954 switch (nxgep->mac.portmode) {
6954 6955 case PORT_10G_COPPER:
6955 6956 case PORT_10G_FIBER:
6956 6957 case PORT_10G_TN1010:
6957 6958 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6958 6959 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6959 6960 /*
6960 6961 * The maximum MSI-X requested will be 8.
6961 6962 * If the # of CPUs is less than 8, we will request
6962 6963 * # MSI-X based on the # of CPUs (default).
6963 6964 */
6964 6965 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6965 6966 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d",
6966 6967 nxge_msix_10g_intrs));
6967 6968 if ((nxge_msix_10g_intrs == 0) ||
6968 6969 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
6969 6970 nmsi = NXGE_MSIX_REQUEST_10G;
6970 6971 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6971 6972 "==>nxge_create_msi_property (10G): reset to 8"));
6972 6973 } else {
6973 6974 nmsi = nxge_msix_10g_intrs;
6974 6975 }
6975 6976
6976 6977 /*
6977 6978 * If # of interrupts requested is 8 (default),
6978 6979 * the checking of the number of cpus will be
6979 6980 * be maintained.
6980 6981 */
6981 6982 if ((nmsi == NXGE_MSIX_REQUEST_10G) &&
6982 6983 (ncpus < nmsi)) {
6983 6984 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6984 6985 "==>nxge_create_msi_property (10G): reset to 8"));
6985 6986 nmsi = ncpus;
6986 6987 }
6987 6988 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6988 6989 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6989 6990 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6990 6991 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6991 6992 break;
6992 6993
6993 6994 default:
6994 6995 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6995 6996 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6996 6997 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6997 6998 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d",
6998 6999 nxge_msix_1g_intrs));
6999 7000 if ((nxge_msix_1g_intrs == 0) ||
7000 7001 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
7001 7002 nmsi = NXGE_MSIX_REQUEST_1G;
7002 7003 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7003 7004 "==>nxge_create_msi_property (1G): reset to 2"));
7004 7005 } else {
7005 7006 nmsi = nxge_msix_1g_intrs;
7006 7007 }
7007 7008 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7008 7009 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
7009 7010 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
7010 7011 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
7011 7012 break;
7012 7013 }
7013 7014
7014 7015 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
7015 7016 return (nmsi);
7016 7017 }
7017 7018
7018 7019 /*
7019 7020 * The following is a software around for the Neptune hardware's
7020 7021 * interrupt bugs; The Neptune hardware may generate spurious interrupts when
7021 7022 * an interrupr handler is removed.
7022 7023 */
7023 7024 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98
7024 7025 #define NXGE_PIM_RESET (1ULL << 29)
7025 7026 #define NXGE_GLU_RESET (1ULL << 30)
7026 7027 #define NXGE_NIU_RESET (1ULL << 31)
7027 7028 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \
7028 7029 NXGE_GLU_RESET | \
7029 7030 NXGE_NIU_RESET)
7030 7031
7031 7032 #define NXGE_WAIT_QUITE_TIME 200000
7032 7033 #define NXGE_WAIT_QUITE_RETRY 40
7033 7034 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */
7034 7035
7035 7036 static void
7036 7037 nxge_niu_peu_reset(p_nxge_t nxgep)
7037 7038 {
7038 7039 uint32_t rvalue;
7039 7040 p_nxge_hw_list_t hw_p;
7040 7041 p_nxge_t fnxgep;
7041 7042 int i, j;
7042 7043
7043 7044 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset"));
7044 7045 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
7045 7046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7046 7047 "==> nxge_niu_peu_reset: NULL hardware pointer"));
7047 7048 return;
7048 7049 }
7049 7050
7050 7051 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7051 7052 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
7052 7053 hw_p->flags, nxgep->nxge_link_poll_timerid,
7053 7054 nxgep->nxge_timerid));
7054 7055
7055 7056 MUTEX_ENTER(&hw_p->nxge_cfg_lock);
7056 7057 /*
7057 7058 * Make sure other instances from the same hardware
7058 7059 * stop sending PIO and in quiescent state.
7059 7060 */
7060 7061 for (i = 0; i < NXGE_MAX_PORTS; i++) {
7061 7062 fnxgep = hw_p->nxge_p[i];
7062 7063 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7063 7064 "==> nxge_niu_peu_reset: checking entry %d "
7064 7065 "nxgep $%p", i, fnxgep));
7065 7066 #ifdef NXGE_DEBUG
7066 7067 if (fnxgep) {
7067 7068 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7068 7069 "==> nxge_niu_peu_reset: entry %d (function %d) "
7069 7070 "link timer id %d hw timer id %d",
7070 7071 i, fnxgep->function_num,
7071 7072 fnxgep->nxge_link_poll_timerid,
7072 7073 fnxgep->nxge_timerid));
7073 7074 }
7074 7075 #endif
7075 7076 if (fnxgep && fnxgep != nxgep &&
7076 7077 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) {
7077 7078 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7078 7079 "==> nxge_niu_peu_reset: checking $%p "
7079 7080 "(function %d) timer ids",
7080 7081 fnxgep, fnxgep->function_num));
7081 7082 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
7082 7083 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7083 7084 "==> nxge_niu_peu_reset: waiting"));
7084 7085 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7085 7086 if (!fnxgep->nxge_timerid &&
7086 7087 !fnxgep->nxge_link_poll_timerid) {
7087 7088 break;
7088 7089 }
7089 7090 }
7090 7091 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7091 7092 if (fnxgep->nxge_timerid ||
7092 7093 fnxgep->nxge_link_poll_timerid) {
7093 7094 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7094 7095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7095 7096 "<== nxge_niu_peu_reset: cannot reset "
7096 7097 "hardware (devices are still in use)"));
7097 7098 return;
7098 7099 }
7099 7100 }
7100 7101 }
7101 7102
7102 7103 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) {
7103 7104 hw_p->flags |= COMMON_RESET_NIU_PCI;
7104 7105 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh,
7105 7106 NXGE_PCI_PORT_LOGIC_OFFSET);
7106 7107 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7107 7108 "nxge_niu_peu_reset: read offset 0x%x (%d) "
7108 7109 "(data 0x%x)",
7109 7110 NXGE_PCI_PORT_LOGIC_OFFSET,
7110 7111 NXGE_PCI_PORT_LOGIC_OFFSET,
7111 7112 rvalue));
7112 7113
7113 7114 rvalue |= NXGE_PCI_RESET_ALL;
7114 7115 pci_config_put32(nxgep->dev_regs->nxge_pciregh,
7115 7116 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue);
7116 7117 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7117 7118 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
7118 7119 rvalue));
7119 7120
7120 7121 NXGE_DELAY(NXGE_PCI_RESET_WAIT);
7121 7122 }
7122 7123
7123 7124 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7124 7125 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset"));
7125 7126 }
7126 7127
7127 7128 static void
7128 7129 nxge_set_pci_replay_timeout(p_nxge_t nxgep)
7129 7130 {
7130 7131 p_dev_regs_t dev_regs;
7131 7132 uint32_t value;
7132 7133
7133 7134 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout"));
7134 7135
7135 7136 if (!nxge_set_replay_timer) {
7136 7137 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7137 7138 "==> nxge_set_pci_replay_timeout: will not change "
7138 7139 "the timeout"));
7139 7140 return;
7140 7141 }
7141 7142
7142 7143 dev_regs = nxgep->dev_regs;
7143 7144 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7144 7145 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
7145 7146 dev_regs, dev_regs->nxge_pciregh));
7146 7147
7147 7148 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) {
7148 7149 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7149 7150 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
7150 7151 "no PCI handle",
7151 7152 dev_regs));
7152 7153 return;
7153 7154 }
7154 7155 value = (pci_config_get32(dev_regs->nxge_pciregh,
7155 7156 PCI_REPLAY_TIMEOUT_CFG_OFFSET) |
7156 7157 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT));
7157 7158
7158 7159 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7159 7160 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
7160 7161 "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
7161 7162 pci_config_get32(dev_regs->nxge_pciregh,
7162 7163 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout,
7163 7164 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value));
7164 7165
7165 7166 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET,
7166 7167 value);
7167 7168
7168 7169 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7169 7170 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
7170 7171 pci_config_get32(dev_regs->nxge_pciregh,
7171 7172 PCI_REPLAY_TIMEOUT_CFG_OFFSET)));
7172 7173
7173 7174 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout"));
7174 7175 }
7175 7176
7176 7177 /*
7177 7178 * quiesce(9E) entry point.
7178 7179 *
7179 7180 * This function is called when the system is single-threaded at high
7180 7181 * PIL with preemption disabled. Therefore, this function must not be
7181 7182 * blocked.
7182 7183 *
7183 7184 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7184 7185 * DDI_FAILURE indicates an error condition and should almost never happen.
7185 7186 */
7186 7187 static int
7187 7188 nxge_quiesce(dev_info_t *dip)
7188 7189 {
7189 7190 int instance = ddi_get_instance(dip);
7190 7191 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
7191 7192
7192 7193 if (nxgep == NULL)
7193 7194 return (DDI_FAILURE);
7194 7195
7195 7196 /* Turn off debugging */
7196 7197 nxge_debug_level = NO_DEBUG;
7197 7198 nxgep->nxge_debug_level = NO_DEBUG;
7198 7199 npi_debug_level = NO_DEBUG;
7199 7200
7200 7201 /*
7201 7202 * Stop link monitor only when linkchkmod is interrupt based
7202 7203 */
7203 7204 if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
7204 7205 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
7205 7206 }
7206 7207
7207 7208 (void) nxge_intr_hw_disable(nxgep);
7208 7209
7209 7210 /*
7210 7211 * Reset the receive MAC side.
7211 7212 */
7212 7213 (void) nxge_rx_mac_disable(nxgep);
7213 7214
7214 7215 /* Disable and soft reset the IPP */
7215 7216 if (!isLDOMguest(nxgep))
7216 7217 (void) nxge_ipp_disable(nxgep);
7217 7218
7218 7219 /*
7219 7220 * Reset the transmit/receive DMA side.
7220 7221 */
7221 7222 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
7222 7223 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
7223 7224
7224 7225 /*
7225 7226 * Reset the transmit MAC side.
7226 7227 */
7227 7228 (void) nxge_tx_mac_disable(nxgep);
7228 7229
7229 7230 return (DDI_SUCCESS);
7230 7231 }
↓ open down ↓ |
2072 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX