Print this page
6601 Various GLD drivers return EINVAL instead of ENOTSUP for unused mac_prop_id_t's
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Igor Kozhukhov <ikozhukhov@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/hxge/hxge_main.c
+++ new/usr/src/uts/common/io/hxge/hxge_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright 2012 Milan Jurik. All rights reserved.
25 + * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
25 26 */
26 27
27 28 /*
28 29 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
29 30 */
30 31 #include <hxge_impl.h>
31 32 #include <hxge_pfc.h>
32 33
33 34 /*
34 35 * PSARC/2007/453 MSI-X interrupt limit override
35 36 * (This PSARC case is limited to MSI-X vectors
36 37 * and SPARC platforms only).
37 38 */
38 39 uint32_t hxge_msi_enable = 2;
39 40
40 41 /*
41 42 * Globals: tunable parameters (/etc/system or adb)
42 43 *
43 44 */
44 45 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
45 46 uint32_t hxge_rbr_spare_size = 0;
46 47 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
47 48 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
48 49 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
49 50 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
50 51 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
51 52 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
52 53
53 54 static hxge_os_mutex_t hxgedebuglock;
54 55 static int hxge_debug_init = 0;
55 56
56 57 /*
57 58 * Debugging flags:
58 59 * hxge_no_tx_lb : transmit load balancing
59 60 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
60 61 * 1 - From the Stack
61 62 * 2 - Destination IP Address
62 63 */
63 64 uint32_t hxge_no_tx_lb = 0;
64 65 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
65 66
66 67 /*
67 68 * Tunables to manage the receive buffer blocks.
68 69 *
69 70 * hxge_rx_threshold_hi: copy all buffers.
70 71 * hxge_rx_bcopy_size_type: receive buffer block size type.
71 72 * hxge_rx_threshold_lo: copy only up to tunable block size type.
72 73 */
73 74 #if defined(__sparc)
74 75 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
75 76 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
76 77 #else
77 78 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
78 79 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
79 80 #endif
80 81 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
81 82
82 83 rtrace_t hpi_rtracebuf;
83 84
84 85 /*
85 86 * Function Prototypes
86 87 */
87 88 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
88 89 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
89 90 static void hxge_unattach(p_hxge_t);
90 91
91 92 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
92 93
93 94 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
94 95 static void hxge_destroy_mutexes(p_hxge_t);
95 96
96 97 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
97 98 static void hxge_unmap_regs(p_hxge_t hxgep);
98 99
99 100 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
100 101 static void hxge_remove_intrs(p_hxge_t hxgep);
101 102 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
102 103 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
103 104 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
104 105 static void hxge_intrs_enable(p_hxge_t hxgep);
105 106 static void hxge_intrs_disable(p_hxge_t hxgep);
106 107 static void hxge_suspend(p_hxge_t);
107 108 static hxge_status_t hxge_resume(p_hxge_t);
108 109 static hxge_status_t hxge_setup_dev(p_hxge_t);
109 110 static void hxge_destroy_dev(p_hxge_t);
110 111 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
111 112 static void hxge_free_mem_pool(p_hxge_t);
112 113 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
113 114 static void hxge_free_rx_mem_pool(p_hxge_t);
114 115 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
115 116 static void hxge_free_tx_mem_pool(p_hxge_t);
116 117 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
117 118 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
118 119 p_hxge_dma_common_t);
119 120 static void hxge_dma_mem_free(p_hxge_dma_common_t);
120 121 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
121 122 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
122 123 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
123 124 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
124 125 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
125 126 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
126 127 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
127 128 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
128 129 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
129 130 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
130 131 p_hxge_dma_common_t *, size_t);
131 132 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
132 133 static int hxge_init_common_dev(p_hxge_t);
133 134 static void hxge_uninit_common_dev(p_hxge_t);
134 135
135 136 /*
136 137 * The next declarations are for the GLDv3 interface.
137 138 */
138 139 static int hxge_m_start(void *);
139 140 static void hxge_m_stop(void *);
140 141 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
141 142 static int hxge_m_promisc(void *, boolean_t);
142 143 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
143 144 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
144 145
145 146 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
146 147 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
147 148 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
148 149 uint_t pr_valsize, const void *pr_val);
149 150 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
150 151 uint_t pr_valsize, void *pr_val);
151 152 static void hxge_m_propinfo(void *barg, const char *pr_name,
152 153 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
153 154 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
154 155 uint_t pr_valsize, const void *pr_val);
155 156 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
156 157 uint_t pr_valsize, void *pr_val);
157 158 static void hxge_link_poll(void *arg);
158 159 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
159 160 static void hxge_msix_init(p_hxge_t hxgep);
160 161
161 162 char *hxge_priv_props[] = {
162 163 "_rxdma_intr_time",
163 164 "_rxdma_intr_pkts",
164 165 "_class_opt_ipv4_tcp",
165 166 "_class_opt_ipv4_udp",
166 167 "_class_opt_ipv4_ah",
167 168 "_class_opt_ipv4_sctp",
168 169 "_class_opt_ipv6_tcp",
169 170 "_class_opt_ipv6_udp",
170 171 "_class_opt_ipv6_ah",
171 172 "_class_opt_ipv6_sctp",
172 173 NULL
173 174 };
174 175
175 176 #define HXGE_MAX_PRIV_PROPS \
176 177 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
177 178
178 179 #define HXGE_MAGIC 0x4E584745UL
179 180 #define MAX_DUMP_SZ 256
180 181
181 182 #define HXGE_M_CALLBACK_FLAGS \
182 183 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
183 184
184 185 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
185 186
186 187 static mac_callbacks_t hxge_m_callbacks = {
187 188 HXGE_M_CALLBACK_FLAGS,
188 189 hxge_m_stat,
189 190 hxge_m_start,
190 191 hxge_m_stop,
191 192 hxge_m_promisc,
192 193 hxge_m_multicst,
193 194 NULL,
194 195 NULL,
195 196 NULL,
196 197 hxge_m_ioctl,
197 198 hxge_m_getcapab,
198 199 NULL,
199 200 NULL,
200 201 hxge_m_setprop,
201 202 hxge_m_getprop,
202 203 hxge_m_propinfo
203 204 };
204 205
205 206 /* PSARC/2007/453 MSI-X interrupt limit override. */
206 207 #define HXGE_MSIX_REQUEST_10G 8
207 208 static int hxge_create_msi_property(p_hxge_t);
208 209
209 210 /* Enable debug messages as necessary. */
210 211 uint64_t hxge_debug_level = 0;
211 212
212 213 /*
213 214 * This list contains the instance structures for the Hydra
214 215 * devices present in the system. The lock exists to guarantee
215 216 * mutually exclusive access to the list.
216 217 */
217 218 void *hxge_list = NULL;
218 219 void *hxge_hw_list = NULL;
219 220 hxge_os_mutex_t hxge_common_lock;
220 221
221 222 extern uint64_t hpi_debug_level;
222 223
223 224 extern hxge_status_t hxge_ldgv_init(p_hxge_t, int *, int *);
224 225 extern hxge_status_t hxge_ldgv_uninit(p_hxge_t);
225 226 extern hxge_status_t hxge_intr_ldgv_init(p_hxge_t);
226 227 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
227 228 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
228 229 extern void hxge_fm_fini(p_hxge_t hxgep);
229 230
230 231 /*
231 232 * Count used to maintain the number of buffers being used
232 233 * by Hydra instances and loaned up to the upper layers.
233 234 */
234 235 uint32_t hxge_mblks_pending = 0;
235 236
236 237 /*
237 238 * Device register access attributes for PIO.
238 239 */
239 240 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
240 241 DDI_DEVICE_ATTR_V0,
241 242 DDI_STRUCTURE_LE_ACC,
242 243 DDI_STRICTORDER_ACC,
243 244 };
244 245
245 246 /*
246 247 * Device descriptor access attributes for DMA.
247 248 */
248 249 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
249 250 DDI_DEVICE_ATTR_V0,
250 251 DDI_STRUCTURE_LE_ACC,
251 252 DDI_STRICTORDER_ACC
252 253 };
253 254
254 255 /*
255 256 * Device buffer access attributes for DMA.
256 257 */
257 258 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
258 259 DDI_DEVICE_ATTR_V0,
259 260 DDI_STRUCTURE_BE_ACC,
260 261 DDI_STRICTORDER_ACC
261 262 };
262 263
263 264 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
264 265 DMA_ATTR_V0, /* version number. */
265 266 0, /* low address */
266 267 0xffffffffffffffff, /* high address */
267 268 0xffffffffffffffff, /* address counter max */
268 269 0x80000, /* alignment */
269 270 0xfc00fc, /* dlim_burstsizes */
270 271 0x1, /* minimum transfer size */
271 272 0xffffffffffffffff, /* maximum transfer size */
272 273 0xffffffffffffffff, /* maximum segment size */
273 274 1, /* scatter/gather list length */
274 275 (unsigned int)1, /* granularity */
275 276 0 /* attribute flags */
276 277 };
277 278
278 279 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
279 280 DMA_ATTR_V0, /* version number. */
280 281 0, /* low address */
281 282 0xffffffffffffffff, /* high address */
282 283 0xffffffffffffffff, /* address counter max */
283 284 0x100000, /* alignment */
284 285 0xfc00fc, /* dlim_burstsizes */
285 286 0x1, /* minimum transfer size */
286 287 0xffffffffffffffff, /* maximum transfer size */
287 288 0xffffffffffffffff, /* maximum segment size */
288 289 1, /* scatter/gather list length */
289 290 (unsigned int)1, /* granularity */
290 291 0 /* attribute flags */
291 292 };
292 293
293 294 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
294 295 DMA_ATTR_V0, /* version number. */
295 296 0, /* low address */
296 297 0xffffffffffffffff, /* high address */
297 298 0xffffffffffffffff, /* address counter max */
298 299 0x40000, /* alignment */
299 300 0xfc00fc, /* dlim_burstsizes */
300 301 0x1, /* minimum transfer size */
301 302 0xffffffffffffffff, /* maximum transfer size */
302 303 0xffffffffffffffff, /* maximum segment size */
303 304 1, /* scatter/gather list length */
304 305 (unsigned int)1, /* granularity */
305 306 0 /* attribute flags */
306 307 };
307 308
308 309 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
309 310 DMA_ATTR_V0, /* version number. */
310 311 0, /* low address */
311 312 0xffffffffffffffff, /* high address */
312 313 0xffffffffffffffff, /* address counter max */
313 314 #if defined(_BIG_ENDIAN)
314 315 0x2000, /* alignment */
315 316 #else
316 317 0x1000, /* alignment */
317 318 #endif
318 319 0xfc00fc, /* dlim_burstsizes */
319 320 0x1, /* minimum transfer size */
320 321 0xffffffffffffffff, /* maximum transfer size */
321 322 0xffffffffffffffff, /* maximum segment size */
322 323 5, /* scatter/gather list length */
323 324 (unsigned int)1, /* granularity */
324 325 0 /* attribute flags */
325 326 };
326 327
327 328 ddi_dma_attr_t hxge_tx_dma_attr = {
328 329 DMA_ATTR_V0, /* version number. */
329 330 0, /* low address */
330 331 0xffffffffffffffff, /* high address */
331 332 0xffffffffffffffff, /* address counter max */
332 333 #if defined(_BIG_ENDIAN)
333 334 0x2000, /* alignment */
334 335 #else
335 336 0x1000, /* alignment */
336 337 #endif
337 338 0xfc00fc, /* dlim_burstsizes */
338 339 0x1, /* minimum transfer size */
339 340 0xffffffffffffffff, /* maximum transfer size */
340 341 0xffffffffffffffff, /* maximum segment size */
341 342 5, /* scatter/gather list length */
342 343 (unsigned int)1, /* granularity */
343 344 0 /* attribute flags */
344 345 };
345 346
346 347 ddi_dma_attr_t hxge_rx_dma_attr = {
347 348 DMA_ATTR_V0, /* version number. */
348 349 0, /* low address */
349 350 0xffffffffffffffff, /* high address */
350 351 0xffffffffffffffff, /* address counter max */
351 352 0x10000, /* alignment */
352 353 0xfc00fc, /* dlim_burstsizes */
353 354 0x1, /* minimum transfer size */
354 355 0xffffffffffffffff, /* maximum transfer size */
355 356 0xffffffffffffffff, /* maximum segment size */
356 357 1, /* scatter/gather list length */
357 358 (unsigned int)1, /* granularity */
358 359 DDI_DMA_RELAXED_ORDERING /* attribute flags */
359 360 };
360 361
361 362 ddi_dma_lim_t hxge_dma_limits = {
362 363 (uint_t)0, /* dlim_addr_lo */
363 364 (uint_t)0xffffffff, /* dlim_addr_hi */
364 365 (uint_t)0xffffffff, /* dlim_cntr_max */
365 366 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
366 367 0x1, /* dlim_minxfer */
367 368 1024 /* dlim_speed */
368 369 };
369 370
370 371 dma_method_t hxge_force_dma = DVMA;
371 372
372 373 /*
373 374 * dma chunk sizes.
374 375 *
375 376 * Try to allocate the largest possible size
376 377 * so that fewer number of dma chunks would be managed
377 378 */
378 379 size_t alloc_sizes[] = {
379 380 0x1000, 0x2000, 0x4000, 0x8000,
380 381 0x10000, 0x20000, 0x40000, 0x80000,
381 382 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
382 383 };
383 384
384 385 /*
385 386 * Translate "dev_t" to a pointer to the associated "dev_info_t".
386 387 */
387 388 static int
388 389 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
389 390 {
390 391 p_hxge_t hxgep = NULL;
391 392 int instance;
392 393 int status = DDI_SUCCESS;
393 394 int i;
394 395
395 396 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
396 397
397 398 /*
398 399 * Get the device instance since we'll need to setup or retrieve a soft
399 400 * state for this instance.
400 401 */
401 402 instance = ddi_get_instance(dip);
402 403
403 404 switch (cmd) {
404 405 case DDI_ATTACH:
405 406 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
406 407 break;
407 408
408 409 case DDI_RESUME:
409 410 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
410 411 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
411 412 if (hxgep == NULL) {
412 413 status = DDI_FAILURE;
413 414 break;
414 415 }
415 416 if (hxgep->dip != dip) {
416 417 status = DDI_FAILURE;
417 418 break;
418 419 }
419 420 if (hxgep->suspended == DDI_PM_SUSPEND) {
420 421 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
421 422 } else {
422 423 (void) hxge_resume(hxgep);
423 424 }
424 425 goto hxge_attach_exit;
425 426
426 427 case DDI_PM_RESUME:
427 428 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
428 429 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
429 430 if (hxgep == NULL) {
430 431 status = DDI_FAILURE;
431 432 break;
432 433 }
433 434 if (hxgep->dip != dip) {
434 435 status = DDI_FAILURE;
435 436 break;
436 437 }
437 438 (void) hxge_resume(hxgep);
438 439 goto hxge_attach_exit;
439 440
440 441 default:
441 442 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
442 443 status = DDI_FAILURE;
443 444 goto hxge_attach_exit;
444 445 }
445 446
446 447 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
447 448 status = DDI_FAILURE;
448 449 HXGE_ERROR_MSG((hxgep, DDI_CTL,
449 450 "ddi_soft_state_zalloc failed"));
450 451 goto hxge_attach_exit;
451 452 }
452 453
453 454 hxgep = ddi_get_soft_state(hxge_list, instance);
454 455 if (hxgep == NULL) {
455 456 status = HXGE_ERROR;
456 457 HXGE_ERROR_MSG((hxgep, DDI_CTL,
457 458 "ddi_get_soft_state failed"));
458 459 goto hxge_attach_fail2;
459 460 }
460 461
461 462 hxgep->drv_state = 0;
462 463 hxgep->dip = dip;
463 464 hxgep->instance = instance;
464 465 hxgep->p_dip = ddi_get_parent(dip);
465 466 hxgep->hxge_debug_level = hxge_debug_level;
466 467 hpi_debug_level = hxge_debug_level;
467 468
468 469 /*
469 470 * Initialize MMAC struture.
470 471 */
471 472 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
472 473 hxgep->mmac.available = hxgep->mmac.total;
473 474 for (i = 0; i < hxgep->mmac.total; i++) {
474 475 hxgep->mmac.addrs[i].set = B_FALSE;
475 476 hxgep->mmac.addrs[i].primary = B_FALSE;
476 477 }
477 478
478 479 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
479 480 &hxge_rx_dma_attr);
480 481
481 482 status = hxge_map_regs(hxgep);
482 483 if (status != HXGE_OK) {
483 484 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
484 485 goto hxge_attach_fail3;
485 486 }
486 487
487 488 status = hxge_init_common_dev(hxgep);
488 489 if (status != HXGE_OK) {
489 490 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
490 491 "hxge_init_common_dev failed"));
491 492 goto hxge_attach_fail4;
492 493 }
493 494
494 495 /*
495 496 * Setup the Ndd parameters for this instance.
496 497 */
497 498 hxge_init_param(hxgep);
498 499
499 500 /*
500 501 * Setup Register Tracing Buffer.
501 502 */
502 503 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
503 504
504 505 /* init stats ptr */
505 506 hxge_init_statsp(hxgep);
506 507
507 508 status = hxge_setup_mutexes(hxgep);
508 509 if (status != HXGE_OK) {
509 510 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
510 511 goto hxge_attach_fail;
511 512 }
512 513
513 514 /* Scrub the MSI-X memory */
514 515 hxge_msix_init(hxgep);
515 516
516 517 status = hxge_get_config_properties(hxgep);
517 518 if (status != HXGE_OK) {
518 519 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
519 520 goto hxge_attach_fail;
520 521 }
521 522
522 523 /*
523 524 * Setup the Kstats for the driver.
524 525 */
525 526 hxge_setup_kstats(hxgep);
526 527 hxge_setup_param(hxgep);
527 528
528 529 status = hxge_setup_system_dma_pages(hxgep);
529 530 if (status != HXGE_OK) {
530 531 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
531 532 goto hxge_attach_fail;
532 533 }
533 534
534 535 hxge_hw_id_init(hxgep);
535 536 hxge_hw_init_niu_common(hxgep);
536 537
537 538 status = hxge_setup_dev(hxgep);
538 539 if (status != DDI_SUCCESS) {
539 540 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
540 541 goto hxge_attach_fail;
541 542 }
542 543
543 544 status = hxge_add_intrs(hxgep);
544 545 if (status != DDI_SUCCESS) {
545 546 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
546 547 goto hxge_attach_fail;
547 548 }
548 549
549 550 /*
550 551 * Enable interrupts.
551 552 */
552 553 hxge_intrs_enable(hxgep);
553 554
554 555 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
555 556 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
556 557 "unable to register to mac layer (%d)", status));
557 558 goto hxge_attach_fail;
558 559 }
559 560 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
560 561
561 562 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
562 563 instance));
563 564
564 565 goto hxge_attach_exit;
565 566
566 567 hxge_attach_fail:
567 568 hxge_unattach(hxgep);
568 569 goto hxge_attach_fail1;
569 570
570 571 hxge_attach_fail5:
571 572 /*
572 573 * Tear down the ndd parameters setup.
573 574 */
574 575 hxge_destroy_param(hxgep);
575 576
576 577 /*
577 578 * Tear down the kstat setup.
578 579 */
579 580 hxge_destroy_kstats(hxgep);
580 581
581 582 hxge_attach_fail4:
582 583 if (hxgep->hxge_hw_p) {
583 584 hxge_uninit_common_dev(hxgep);
584 585 hxgep->hxge_hw_p = NULL;
585 586 }
586 587 hxge_attach_fail3:
587 588 /*
588 589 * Unmap the register setup.
589 590 */
590 591 hxge_unmap_regs(hxgep);
591 592
592 593 hxge_fm_fini(hxgep);
593 594
594 595 hxge_attach_fail2:
595 596 ddi_soft_state_free(hxge_list, hxgep->instance);
596 597
597 598 hxge_attach_fail1:
598 599 if (status != HXGE_OK)
599 600 status = (HXGE_ERROR | HXGE_DDI_FAILED);
600 601 hxgep = NULL;
601 602
602 603 hxge_attach_exit:
603 604 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
604 605 status));
605 606
606 607 return (status);
607 608 }
608 609
609 610 static int
610 611 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
611 612 {
612 613 int status = DDI_SUCCESS;
613 614 int instance;
614 615 p_hxge_t hxgep = NULL;
615 616
616 617 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
617 618 instance = ddi_get_instance(dip);
618 619 hxgep = ddi_get_soft_state(hxge_list, instance);
619 620 if (hxgep == NULL) {
620 621 status = DDI_FAILURE;
621 622 goto hxge_detach_exit;
622 623 }
623 624
624 625 switch (cmd) {
625 626 case DDI_DETACH:
626 627 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
627 628 break;
628 629
629 630 case DDI_PM_SUSPEND:
630 631 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
631 632 hxgep->suspended = DDI_PM_SUSPEND;
632 633 hxge_suspend(hxgep);
633 634 break;
634 635
635 636 case DDI_SUSPEND:
636 637 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
637 638 if (hxgep->suspended != DDI_PM_SUSPEND) {
638 639 hxgep->suspended = DDI_SUSPEND;
639 640 hxge_suspend(hxgep);
640 641 }
641 642 break;
642 643
643 644 default:
644 645 status = DDI_FAILURE;
645 646 break;
646 647 }
647 648
648 649 if (cmd != DDI_DETACH)
649 650 goto hxge_detach_exit;
650 651
651 652 /*
652 653 * Stop the xcvr polling.
653 654 */
654 655 hxgep->suspended = cmd;
655 656
656 657 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
657 658 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
658 659 "<== hxge_detach status = 0x%08X", status));
659 660 return (DDI_FAILURE);
660 661 }
661 662 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
662 663 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
663 664
664 665 hxge_unattach(hxgep);
665 666 hxgep = NULL;
666 667
667 668 hxge_detach_exit:
668 669 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
669 670 status));
670 671
671 672 return (status);
672 673 }
673 674
674 675 static void
675 676 hxge_unattach(p_hxge_t hxgep)
676 677 {
677 678 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
678 679
679 680 if (hxgep == NULL || hxgep->dev_regs == NULL) {
680 681 return;
681 682 }
682 683
683 684 if (hxgep->hxge_hw_p) {
684 685 hxge_uninit_common_dev(hxgep);
685 686 hxgep->hxge_hw_p = NULL;
686 687 }
687 688
688 689 if (hxgep->hxge_timerid) {
689 690 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
690 691 hxgep->hxge_timerid = 0;
691 692 }
692 693
693 694 /* Stop interrupts. */
694 695 hxge_intrs_disable(hxgep);
695 696
696 697 /* Stop any further interrupts. */
697 698 hxge_remove_intrs(hxgep);
698 699
699 700 /* Stop the device and free resources. */
700 701 hxge_destroy_dev(hxgep);
701 702
702 703 /* Tear down the ndd parameters setup. */
703 704 hxge_destroy_param(hxgep);
704 705
705 706 /* Tear down the kstat setup. */
706 707 hxge_destroy_kstats(hxgep);
707 708
708 709 /*
709 710 * Remove the list of ndd parameters which were setup during attach.
710 711 */
711 712 if (hxgep->dip) {
712 713 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
713 714 " hxge_unattach: remove all properties"));
714 715 (void) ddi_prop_remove_all(hxgep->dip);
715 716 }
716 717
717 718 /*
718 719 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
719 720 * previous state before unmapping the registers.
720 721 */
721 722 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
722 723 HXGE_DELAY(1000);
723 724
724 725 /*
725 726 * Unmap the register setup.
726 727 */
727 728 hxge_unmap_regs(hxgep);
728 729
729 730 hxge_fm_fini(hxgep);
730 731
731 732 /* Destroy all mutexes. */
732 733 hxge_destroy_mutexes(hxgep);
733 734
734 735 /*
735 736 * Free the soft state data structures allocated with this instance.
736 737 */
737 738 ddi_soft_state_free(hxge_list, hxgep->instance);
738 739
739 740 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
740 741 }
741 742
742 743 static hxge_status_t
743 744 hxge_map_regs(p_hxge_t hxgep)
744 745 {
745 746 int ddi_status = DDI_SUCCESS;
746 747 p_dev_regs_t dev_regs;
747 748
748 749 #ifdef HXGE_DEBUG
749 750 char *sysname;
750 751 #endif
751 752
752 753 off_t regsize;
753 754 hxge_status_t status = HXGE_OK;
754 755 int nregs;
755 756
756 757 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
757 758
758 759 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
759 760 return (HXGE_ERROR);
760 761
761 762 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
762 763
763 764 hxgep->dev_regs = NULL;
764 765 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
765 766 dev_regs->hxge_regh = NULL;
766 767 dev_regs->hxge_pciregh = NULL;
767 768 dev_regs->hxge_msix_regh = NULL;
768 769
769 770 (void) ddi_dev_regsize(hxgep->dip, 0, ®size);
770 771 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
771 772 "hxge_map_regs: pci config size 0x%x", regsize));
772 773
773 774 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
774 775 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
775 776 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
776 777 if (ddi_status != DDI_SUCCESS) {
777 778 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
778 779 "ddi_map_regs, hxge bus config regs failed"));
779 780 goto hxge_map_regs_fail0;
780 781 }
781 782
782 783 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
783 784 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
784 785 dev_regs->hxge_pciregp,
785 786 dev_regs->hxge_pciregh));
786 787
787 788 (void) ddi_dev_regsize(hxgep->dip, 1, ®size);
788 789 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
789 790 "hxge_map_regs: pio size 0x%x", regsize));
790 791
791 792 /* set up the device mapped register */
792 793 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
793 794 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
794 795 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
795 796
796 797 if (ddi_status != DDI_SUCCESS) {
797 798 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
798 799 "ddi_map_regs for Hydra global reg failed"));
799 800 goto hxge_map_regs_fail1;
800 801 }
801 802
802 803 /* set up the msi/msi-x mapped register */
803 804 (void) ddi_dev_regsize(hxgep->dip, 2, ®size);
804 805 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
805 806 "hxge_map_regs: msix size 0x%x", regsize));
806 807
807 808 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
808 809 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
809 810 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
810 811
811 812 if (ddi_status != DDI_SUCCESS) {
812 813 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
813 814 "ddi_map_regs for msi reg failed"));
814 815 goto hxge_map_regs_fail2;
815 816 }
816 817
817 818 hxgep->dev_regs = dev_regs;
818 819
819 820 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
820 821 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
821 822 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
822 823 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
823 824
824 825 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
825 826 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
826 827
827 828 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
828 829 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
829 830
830 831 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
831 832 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
832 833
833 834 goto hxge_map_regs_exit;
834 835
835 836 hxge_map_regs_fail3:
836 837 if (dev_regs->hxge_msix_regh) {
837 838 ddi_regs_map_free(&dev_regs->hxge_msix_regh);
838 839 }
839 840
840 841 hxge_map_regs_fail2:
841 842 if (dev_regs->hxge_regh) {
842 843 ddi_regs_map_free(&dev_regs->hxge_regh);
843 844 }
844 845
845 846 hxge_map_regs_fail1:
846 847 if (dev_regs->hxge_pciregh) {
847 848 ddi_regs_map_free(&dev_regs->hxge_pciregh);
848 849 }
849 850
850 851 hxge_map_regs_fail0:
851 852 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
852 853 kmem_free(dev_regs, sizeof (dev_regs_t));
853 854
854 855 hxge_map_regs_exit:
855 856 if (ddi_status != DDI_SUCCESS)
856 857 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
857 858 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
858 859 return (status);
859 860 }
860 861
861 862 static void
862 863 hxge_unmap_regs(p_hxge_t hxgep)
863 864 {
864 865 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
865 866 if (hxgep->dev_regs) {
866 867 if (hxgep->dev_regs->hxge_pciregh) {
867 868 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
868 869 "==> hxge_unmap_regs: bus"));
869 870 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
870 871 hxgep->dev_regs->hxge_pciregh = NULL;
871 872 }
872 873
873 874 if (hxgep->dev_regs->hxge_regh) {
874 875 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
875 876 "==> hxge_unmap_regs: device registers"));
876 877 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
877 878 hxgep->dev_regs->hxge_regh = NULL;
878 879 }
879 880
880 881 if (hxgep->dev_regs->hxge_msix_regh) {
881 882 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
882 883 "==> hxge_unmap_regs: device interrupts"));
883 884 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
884 885 hxgep->dev_regs->hxge_msix_regh = NULL;
885 886 }
886 887 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
887 888 hxgep->dev_regs = NULL;
888 889 }
889 890 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
890 891 }
891 892
892 893 static hxge_status_t
893 894 hxge_setup_mutexes(p_hxge_t hxgep)
894 895 {
895 896 int ddi_status = DDI_SUCCESS;
896 897 hxge_status_t status = HXGE_OK;
897 898
898 899 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
899 900
900 901 /*
901 902 * Get the interrupt cookie so the mutexes can be Initialised.
902 903 */
903 904 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
904 905 &hxgep->interrupt_cookie);
905 906
906 907 if (ddi_status != DDI_SUCCESS) {
907 908 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
908 909 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
909 910 goto hxge_setup_mutexes_exit;
910 911 }
911 912
912 913 /*
913 914 * Initialize mutex's for this device.
914 915 */
915 916 MUTEX_INIT(hxgep->genlock, NULL,
916 917 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
917 918 MUTEX_INIT(&hxgep->vmac_lock, NULL,
918 919 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
919 920 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
920 921 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
921 922 RW_INIT(&hxgep->filter_lock, NULL,
922 923 RW_DRIVER, (void *) hxgep->interrupt_cookie);
923 924 MUTEX_INIT(&hxgep->pio_lock, NULL,
924 925 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
925 926 MUTEX_INIT(&hxgep->timeout.lock, NULL,
926 927 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
927 928
928 929 hxge_setup_mutexes_exit:
929 930 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
930 931 "<== hxge_setup_mutexes status = %x", status));
931 932
932 933 if (ddi_status != DDI_SUCCESS)
933 934 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
934 935
935 936 return (status);
936 937 }
937 938
938 939 static void
939 940 hxge_destroy_mutexes(p_hxge_t hxgep)
940 941 {
941 942 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
942 943 RW_DESTROY(&hxgep->filter_lock);
943 944 MUTEX_DESTROY(&hxgep->vmac_lock);
944 945 MUTEX_DESTROY(&hxgep->ouraddr_lock);
945 946 MUTEX_DESTROY(hxgep->genlock);
946 947 MUTEX_DESTROY(&hxgep->pio_lock);
947 948 MUTEX_DESTROY(&hxgep->timeout.lock);
948 949
949 950 if (hxge_debug_init == 1) {
950 951 MUTEX_DESTROY(&hxgedebuglock);
951 952 hxge_debug_init = 0;
952 953 }
953 954
954 955 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
955 956 }
956 957
957 958 hxge_status_t
958 959 hxge_init(p_hxge_t hxgep)
959 960 {
960 961 hxge_status_t status = HXGE_OK;
961 962
962 963 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
963 964
964 965 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
965 966 return (status);
966 967 }
967 968
968 969 /*
969 970 * Allocate system memory for the receive/transmit buffer blocks and
970 971 * receive/transmit descriptor rings.
971 972 */
972 973 status = hxge_alloc_mem_pool(hxgep);
973 974 if (status != HXGE_OK) {
974 975 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
975 976 goto hxge_init_fail1;
976 977 }
977 978
978 979 /*
979 980 * Initialize and enable TXDMA channels.
980 981 */
981 982 status = hxge_init_txdma_channels(hxgep);
982 983 if (status != HXGE_OK) {
983 984 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
984 985 goto hxge_init_fail3;
985 986 }
986 987
987 988 /*
988 989 * Initialize and enable RXDMA channels.
989 990 */
990 991 status = hxge_init_rxdma_channels(hxgep);
991 992 if (status != HXGE_OK) {
992 993 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
993 994 goto hxge_init_fail4;
994 995 }
995 996
996 997 /*
997 998 * Initialize TCAM
998 999 */
999 1000 status = hxge_classify_init(hxgep);
1000 1001 if (status != HXGE_OK) {
1001 1002 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1002 1003 goto hxge_init_fail5;
1003 1004 }
1004 1005
1005 1006 /*
1006 1007 * Initialize the VMAC block.
1007 1008 */
1008 1009 status = hxge_vmac_init(hxgep);
1009 1010 if (status != HXGE_OK) {
1010 1011 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1011 1012 goto hxge_init_fail5;
1012 1013 }
1013 1014
1014 1015 /* Bringup - this may be unnecessary when PXE and FCODE available */
1015 1016 status = hxge_pfc_set_default_mac_addr(hxgep);
1016 1017 if (status != HXGE_OK) {
1017 1018 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1018 1019 "Default Address Failure\n"));
1019 1020 goto hxge_init_fail5;
1020 1021 }
1021 1022
1022 1023 /*
1023 1024 * Enable hardware interrupts.
1024 1025 */
1025 1026 hxge_intr_hw_enable(hxgep);
1026 1027 hxgep->drv_state |= STATE_HW_INITIALIZED;
1027 1028
1028 1029 goto hxge_init_exit;
1029 1030
1030 1031 hxge_init_fail5:
1031 1032 hxge_uninit_rxdma_channels(hxgep);
1032 1033 hxge_init_fail4:
1033 1034 hxge_uninit_txdma_channels(hxgep);
1034 1035 hxge_init_fail3:
1035 1036 hxge_free_mem_pool(hxgep);
1036 1037 hxge_init_fail1:
1037 1038 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1038 1039 "<== hxge_init status (failed) = 0x%08x", status));
1039 1040 return (status);
1040 1041
1041 1042 hxge_init_exit:
1042 1043
1043 1044 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1044 1045 status));
1045 1046
1046 1047 return (status);
1047 1048 }
1048 1049
1049 1050 timeout_id_t
1050 1051 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1051 1052 {
1052 1053 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1053 1054 return (timeout(func, (caddr_t)hxgep,
1054 1055 drv_usectohz(1000 * msec)));
1055 1056 }
1056 1057 return (NULL);
1057 1058 }
1058 1059
1059 1060 /*ARGSUSED*/
1060 1061 void
1061 1062 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1062 1063 {
1063 1064 if (timerid) {
1064 1065 (void) untimeout(timerid);
1065 1066 }
1066 1067 }
1067 1068
1068 1069 void
1069 1070 hxge_uninit(p_hxge_t hxgep)
1070 1071 {
1071 1072 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1072 1073
1073 1074 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1074 1075 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1075 1076 "==> hxge_uninit: not initialized"));
1076 1077 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1077 1078 return;
1078 1079 }
1079 1080
1080 1081 /* Stop timer */
1081 1082 if (hxgep->hxge_timerid) {
1082 1083 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1083 1084 hxgep->hxge_timerid = 0;
1084 1085 }
1085 1086
1086 1087 (void) hxge_intr_hw_disable(hxgep);
1087 1088
1088 1089 /* Reset the receive VMAC side. */
1089 1090 (void) hxge_rx_vmac_disable(hxgep);
1090 1091
1091 1092 /* Free classification resources */
1092 1093 (void) hxge_classify_uninit(hxgep);
1093 1094
1094 1095 /* Reset the transmit/receive DMA side. */
1095 1096 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1096 1097 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1097 1098
1098 1099 hxge_uninit_txdma_channels(hxgep);
1099 1100 hxge_uninit_rxdma_channels(hxgep);
1100 1101
1101 1102 /* Reset the transmit VMAC side. */
1102 1103 (void) hxge_tx_vmac_disable(hxgep);
1103 1104
1104 1105 hxge_free_mem_pool(hxgep);
1105 1106
1106 1107 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1107 1108
1108 1109 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1109 1110 }
1110 1111
1111 1112 /*ARGSUSED*/
1112 1113 /*VARARGS*/
1113 1114 void
1114 1115 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1115 1116 {
1116 1117 char msg_buffer[1048];
1117 1118 char prefix_buffer[32];
1118 1119 int instance;
1119 1120 uint64_t debug_level;
1120 1121 int cmn_level = CE_CONT;
1121 1122 va_list ap;
1122 1123
1123 1124 debug_level = (hxgep == NULL) ? hxge_debug_level :
1124 1125 hxgep->hxge_debug_level;
1125 1126
1126 1127 if ((level & debug_level) || (level == HXGE_NOTE) ||
1127 1128 (level == HXGE_ERR_CTL)) {
1128 1129 /* do the msg processing */
1129 1130 if (hxge_debug_init == 0) {
1130 1131 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1131 1132 hxge_debug_init = 1;
1132 1133 }
1133 1134
1134 1135 MUTEX_ENTER(&hxgedebuglock);
1135 1136
1136 1137 if ((level & HXGE_NOTE)) {
1137 1138 cmn_level = CE_NOTE;
1138 1139 }
1139 1140
1140 1141 if (level & HXGE_ERR_CTL) {
1141 1142 cmn_level = CE_WARN;
1142 1143 }
1143 1144
1144 1145 va_start(ap, fmt);
1145 1146 (void) vsprintf(msg_buffer, fmt, ap);
1146 1147 va_end(ap);
1147 1148
1148 1149 if (hxgep == NULL) {
1149 1150 instance = -1;
1150 1151 (void) sprintf(prefix_buffer, "%s :", "hxge");
1151 1152 } else {
1152 1153 instance = hxgep->instance;
1153 1154 (void) sprintf(prefix_buffer,
1154 1155 "%s%d :", "hxge", instance);
1155 1156 }
1156 1157
1157 1158 MUTEX_EXIT(&hxgedebuglock);
1158 1159 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1159 1160 }
1160 1161 }
1161 1162
1162 1163 char *
1163 1164 hxge_dump_packet(char *addr, int size)
1164 1165 {
1165 1166 uchar_t *ap = (uchar_t *)addr;
1166 1167 int i;
1167 1168 static char etherbuf[1024];
1168 1169 char *cp = etherbuf;
1169 1170 char digits[] = "0123456789abcdef";
1170 1171
1171 1172 if (!size)
1172 1173 size = 60;
1173 1174
1174 1175 if (size > MAX_DUMP_SZ) {
1175 1176 /* Dump the leading bytes */
1176 1177 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1177 1178 if (*ap > 0x0f)
1178 1179 *cp++ = digits[*ap >> 4];
1179 1180 *cp++ = digits[*ap++ & 0xf];
1180 1181 *cp++ = ':';
1181 1182 }
1182 1183 for (i = 0; i < 20; i++)
1183 1184 *cp++ = '.';
1184 1185 /* Dump the last MAX_DUMP_SZ/2 bytes */
1185 1186 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1186 1187 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1187 1188 if (*ap > 0x0f)
1188 1189 *cp++ = digits[*ap >> 4];
1189 1190 *cp++ = digits[*ap++ & 0xf];
1190 1191 *cp++ = ':';
1191 1192 }
1192 1193 } else {
1193 1194 for (i = 0; i < size; i++) {
1194 1195 if (*ap > 0x0f)
1195 1196 *cp++ = digits[*ap >> 4];
1196 1197 *cp++ = digits[*ap++ & 0xf];
1197 1198 *cp++ = ':';
1198 1199 }
1199 1200 }
1200 1201 *--cp = 0;
1201 1202 return (etherbuf);
1202 1203 }
1203 1204
1204 1205 static void
1205 1206 hxge_suspend(p_hxge_t hxgep)
1206 1207 {
1207 1208 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1208 1209
1209 1210 /*
1210 1211 * Stop the link status timer before hxge_intrs_disable() to avoid
1211 1212 * accessing the the MSIX table simultaneously. Note that the timer
1212 1213 * routine polls for MSIX parity errors.
1213 1214 */
1214 1215 MUTEX_ENTER(&hxgep->timeout.lock);
1215 1216 if (hxgep->timeout.id)
1216 1217 (void) untimeout(hxgep->timeout.id);
1217 1218 MUTEX_EXIT(&hxgep->timeout.lock);
1218 1219
1219 1220 hxge_intrs_disable(hxgep);
1220 1221 hxge_destroy_dev(hxgep);
1221 1222
1222 1223 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1223 1224 }
1224 1225
1225 1226 static hxge_status_t
1226 1227 hxge_resume(p_hxge_t hxgep)
1227 1228 {
1228 1229 hxge_status_t status = HXGE_OK;
1229 1230
1230 1231 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1231 1232 hxgep->suspended = DDI_RESUME;
1232 1233
1233 1234 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1234 1235 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1235 1236
1236 1237 (void) hxge_rx_vmac_enable(hxgep);
1237 1238 (void) hxge_tx_vmac_enable(hxgep);
1238 1239
1239 1240 hxge_intrs_enable(hxgep);
1240 1241
1241 1242 hxgep->suspended = 0;
1242 1243
1243 1244 /*
1244 1245 * Resume the link status timer after hxge_intrs_enable to avoid
1245 1246 * accessing MSIX table simultaneously.
1246 1247 */
1247 1248 MUTEX_ENTER(&hxgep->timeout.lock);
1248 1249 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1249 1250 hxgep->timeout.ticks);
1250 1251 MUTEX_EXIT(&hxgep->timeout.lock);
1251 1252
1252 1253 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1253 1254 "<== hxge_resume status = 0x%x", status));
1254 1255
1255 1256 return (status);
1256 1257 }
1257 1258
1258 1259 static hxge_status_t
1259 1260 hxge_setup_dev(p_hxge_t hxgep)
1260 1261 {
1261 1262 hxge_status_t status = HXGE_OK;
1262 1263
1263 1264 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1264 1265
1265 1266 status = hxge_link_init(hxgep);
1266 1267 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1267 1268 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1268 1269 "Bad register acc handle"));
1269 1270 status = HXGE_ERROR;
1270 1271 }
1271 1272
1272 1273 if (status != HXGE_OK) {
1273 1274 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1274 1275 " hxge_setup_dev status (link init 0x%08x)", status));
1275 1276 goto hxge_setup_dev_exit;
1276 1277 }
1277 1278
1278 1279 hxge_setup_dev_exit:
1279 1280 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1280 1281 "<== hxge_setup_dev status = 0x%08x", status));
1281 1282
1282 1283 return (status);
1283 1284 }
1284 1285
1285 1286 static void
1286 1287 hxge_destroy_dev(p_hxge_t hxgep)
1287 1288 {
1288 1289 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1289 1290
1290 1291 (void) hxge_hw_stop(hxgep);
1291 1292
1292 1293 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1293 1294 }
1294 1295
1295 1296 static hxge_status_t
1296 1297 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1297 1298 {
1298 1299 int ddi_status = DDI_SUCCESS;
1299 1300 uint_t count;
1300 1301 ddi_dma_cookie_t cookie;
1301 1302 uint_t iommu_pagesize;
1302 1303 hxge_status_t status = HXGE_OK;
1303 1304
1304 1305 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1305 1306
1306 1307 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1307 1308 iommu_pagesize = dvma_pagesize(hxgep->dip);
1308 1309
1309 1310 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1310 1311 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1311 1312 " default_block_size %d iommu_pagesize %d",
1312 1313 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1313 1314 hxgep->rx_default_block_size, iommu_pagesize));
1314 1315
1315 1316 if (iommu_pagesize != 0) {
1316 1317 if (hxgep->sys_page_sz == iommu_pagesize) {
1317 1318 /* Hydra support up to 8K pages */
1318 1319 if (iommu_pagesize > 0x2000)
1319 1320 hxgep->sys_page_sz = 0x2000;
1320 1321 } else {
1321 1322 if (hxgep->sys_page_sz > iommu_pagesize)
1322 1323 hxgep->sys_page_sz = iommu_pagesize;
1323 1324 }
1324 1325 }
1325 1326
1326 1327 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1327 1328
1328 1329 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1329 1330 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1330 1331 "default_block_size %d page mask %d",
1331 1332 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1332 1333 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1333 1334
1334 1335 switch (hxgep->sys_page_sz) {
1335 1336 default:
1336 1337 hxgep->sys_page_sz = 0x1000;
1337 1338 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1338 1339 hxgep->rx_default_block_size = 0x1000;
1339 1340 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1340 1341 break;
1341 1342 case 0x1000:
1342 1343 hxgep->rx_default_block_size = 0x1000;
1343 1344 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1344 1345 break;
1345 1346 case 0x2000:
1346 1347 hxgep->rx_default_block_size = 0x2000;
1347 1348 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1348 1349 break;
1349 1350 }
1350 1351
1351 1352 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1352 1353 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1353 1354
1354 1355 /*
1355 1356 * Get the system DMA burst size.
1356 1357 */
1357 1358 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1358 1359 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1359 1360 if (ddi_status != DDI_SUCCESS) {
1360 1361 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1361 1362 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1362 1363 goto hxge_get_soft_properties_exit;
1363 1364 }
1364 1365
1365 1366 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1366 1367 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1367 1368 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1368 1369 &cookie, &count);
1369 1370 if (ddi_status != DDI_DMA_MAPPED) {
1370 1371 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1371 1372 "Binding spare handle to find system burstsize failed."));
1372 1373 ddi_status = DDI_FAILURE;
1373 1374 goto hxge_get_soft_properties_fail1;
1374 1375 }
1375 1376
1376 1377 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1377 1378 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1378 1379
1379 1380 hxge_get_soft_properties_fail1:
1380 1381 ddi_dma_free_handle(&hxgep->dmasparehandle);
1381 1382
1382 1383 hxge_get_soft_properties_exit:
1383 1384
1384 1385 if (ddi_status != DDI_SUCCESS)
1385 1386 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1386 1387
1387 1388 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1388 1389 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1389 1390
1390 1391 return (status);
1391 1392 }
1392 1393
1393 1394 static hxge_status_t
1394 1395 hxge_alloc_mem_pool(p_hxge_t hxgep)
1395 1396 {
1396 1397 hxge_status_t status = HXGE_OK;
1397 1398
1398 1399 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1399 1400
1400 1401 status = hxge_alloc_rx_mem_pool(hxgep);
1401 1402 if (status != HXGE_OK) {
1402 1403 return (HXGE_ERROR);
1403 1404 }
1404 1405
1405 1406 status = hxge_alloc_tx_mem_pool(hxgep);
1406 1407 if (status != HXGE_OK) {
1407 1408 hxge_free_rx_mem_pool(hxgep);
1408 1409 return (HXGE_ERROR);
1409 1410 }
1410 1411
1411 1412 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1412 1413 return (HXGE_OK);
1413 1414 }
1414 1415
1415 1416 static void
1416 1417 hxge_free_mem_pool(p_hxge_t hxgep)
1417 1418 {
1418 1419 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1419 1420
1420 1421 hxge_free_rx_mem_pool(hxgep);
1421 1422 hxge_free_tx_mem_pool(hxgep);
1422 1423
1423 1424 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1424 1425 }
1425 1426
1426 1427 static hxge_status_t
1427 1428 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1428 1429 {
1429 1430 int i, j;
1430 1431 uint32_t ndmas, st_rdc;
1431 1432 p_hxge_dma_pt_cfg_t p_all_cfgp;
1432 1433 p_hxge_hw_pt_cfg_t p_cfgp;
1433 1434 p_hxge_dma_pool_t dma_poolp;
1434 1435 p_hxge_dma_common_t *dma_buf_p;
1435 1436 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1436 1437 p_hxge_dma_common_t *dma_rbr_cntl_p;
1437 1438 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1438 1439 p_hxge_dma_common_t *dma_rcr_cntl_p;
1439 1440 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1440 1441 p_hxge_dma_common_t *dma_mbox_cntl_p;
1441 1442 size_t rx_buf_alloc_size;
1442 1443 size_t rx_rbr_cntl_alloc_size;
1443 1444 size_t rx_rcr_cntl_alloc_size;
1444 1445 size_t rx_mbox_cntl_alloc_size;
1445 1446 uint32_t *num_chunks; /* per dma */
1446 1447 hxge_status_t status = HXGE_OK;
1447 1448
1448 1449 uint32_t hxge_port_rbr_size;
1449 1450 uint32_t hxge_port_rbr_spare_size;
1450 1451 uint32_t hxge_port_rcr_size;
1451 1452
1452 1453 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1453 1454
1454 1455 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1455 1456 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1456 1457 st_rdc = p_cfgp->start_rdc;
1457 1458 ndmas = p_cfgp->max_rdcs;
1458 1459
1459 1460 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1460 1461 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1461 1462
1462 1463 /*
1463 1464 * Allocate memory for each receive DMA channel.
1464 1465 */
1465 1466 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1466 1467 KM_SLEEP);
1467 1468 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1468 1469 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1469 1470
1470 1471 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1471 1472 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1472 1473 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1473 1474 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1474 1475 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1475 1476 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1476 1477 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1477 1478 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1478 1479 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1479 1480 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1480 1481 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1481 1482 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1482 1483
1483 1484 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1484 1485 KM_SLEEP);
1485 1486
1486 1487 /*
1487 1488 * Assume that each DMA channel will be configured with default block
1488 1489 * size. rbr block counts are mod of batch count (16).
1489 1490 */
1490 1491 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1491 1492 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1492 1493
1493 1494 if (!hxge_port_rbr_size) {
1494 1495 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1495 1496 }
1496 1497
1497 1498 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1498 1499 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1499 1500 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1500 1501 }
1501 1502
1502 1503 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1503 1504 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1504 1505
1505 1506 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1506 1507 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1507 1508 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1508 1509 }
1509 1510
1510 1511 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1511 1512 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1512 1513
1513 1514 /*
1514 1515 * Addresses of receive block ring, receive completion ring and the
1515 1516 * mailbox must be all cache-aligned (64 bytes).
1516 1517 */
1517 1518 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1518 1519 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1519 1520 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1520 1521 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1521 1522
1522 1523 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1523 1524 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1524 1525 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1525 1526 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1526 1527 hxge_port_rcr_size, rx_cntl_alloc_size));
1527 1528
1528 1529 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1529 1530 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1530 1531
1531 1532 /*
1532 1533 * Allocate memory for receive buffers and descriptor rings. Replace
1533 1534 * allocation functions with interface functions provided by the
1534 1535 * partition manager when it is available.
1535 1536 */
1536 1537 /*
1537 1538 * Allocate memory for the receive buffer blocks.
1538 1539 */
1539 1540 for (i = 0; i < ndmas; i++) {
1540 1541 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1541 1542 " hxge_alloc_rx_mem_pool to alloc mem: "
1542 1543 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1543 1544 i, dma_buf_p[i], &dma_buf_p[i]));
1544 1545
1545 1546 num_chunks[i] = 0;
1546 1547
1547 1548 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1548 1549 rx_buf_alloc_size, hxgep->rx_default_block_size,
1549 1550 &num_chunks[i]);
1550 1551 if (status != HXGE_OK) {
1551 1552 break;
1552 1553 }
1553 1554
1554 1555 st_rdc++;
1555 1556 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1556 1557 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1557 1558 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1558 1559 dma_buf_p[i], &dma_buf_p[i]));
1559 1560 }
1560 1561
1561 1562 if (i < ndmas) {
1562 1563 goto hxge_alloc_rx_mem_fail1;
1563 1564 }
1564 1565
1565 1566 /*
1566 1567 * Allocate memory for descriptor rings and mailbox.
1567 1568 */
1568 1569 st_rdc = p_cfgp->start_rdc;
1569 1570 for (j = 0; j < ndmas; j++) {
1570 1571 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1571 1572 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1572 1573 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1573 1574 break;
1574 1575 }
1575 1576
1576 1577 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1577 1578 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1578 1579 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1579 1580 break;
1580 1581 }
1581 1582
1582 1583 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1583 1584 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1584 1585 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1585 1586 break;
1586 1587 }
1587 1588 st_rdc++;
1588 1589 }
1589 1590
1590 1591 if (j < ndmas) {
1591 1592 goto hxge_alloc_rx_mem_fail2;
1592 1593 }
1593 1594
1594 1595 dma_poolp->ndmas = ndmas;
1595 1596 dma_poolp->num_chunks = num_chunks;
1596 1597 dma_poolp->buf_allocated = B_TRUE;
1597 1598 hxgep->rx_buf_pool_p = dma_poolp;
1598 1599 dma_poolp->dma_buf_pool_p = dma_buf_p;
1599 1600
1600 1601 dma_rbr_cntl_poolp->ndmas = ndmas;
1601 1602 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1602 1603 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1603 1604 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1604 1605
1605 1606 dma_rcr_cntl_poolp->ndmas = ndmas;
1606 1607 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1607 1608 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1608 1609 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1609 1610
1610 1611 dma_mbox_cntl_poolp->ndmas = ndmas;
1611 1612 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1612 1613 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1613 1614 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1614 1615
1615 1616 goto hxge_alloc_rx_mem_pool_exit;
1616 1617
1617 1618 hxge_alloc_rx_mem_fail2:
1618 1619 /* Free control buffers */
1619 1620 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1620 1621 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1621 1622 for (; j >= 0; j--) {
1622 1623 hxge_free_rx_cntl_dma(hxgep,
1623 1624 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1624 1625 hxge_free_rx_cntl_dma(hxgep,
1625 1626 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1626 1627 hxge_free_rx_cntl_dma(hxgep,
1627 1628 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1628 1629 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1629 1630 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1630 1631 }
1631 1632 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1632 1633 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1633 1634
1634 1635 hxge_alloc_rx_mem_fail1:
1635 1636 /* Free data buffers */
1636 1637 i--;
1637 1638 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1638 1639 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1639 1640 for (; i >= 0; i--) {
1640 1641 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1641 1642 num_chunks[i]);
1642 1643 }
1643 1644 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1644 1645 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1645 1646
1646 1647 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1647 1648 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1648 1649 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1649 1650 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1650 1651 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1651 1652 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1652 1653 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1653 1654 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1654 1655 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1655 1656
1656 1657 hxge_alloc_rx_mem_pool_exit:
1657 1658 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1658 1659 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1659 1660
1660 1661 return (status);
1661 1662 }
1662 1663
1663 1664 static void
1664 1665 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1665 1666 {
1666 1667 uint32_t i, ndmas;
1667 1668 p_hxge_dma_pool_t dma_poolp;
1668 1669 p_hxge_dma_common_t *dma_buf_p;
1669 1670 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1670 1671 p_hxge_dma_common_t *dma_rbr_cntl_p;
1671 1672 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1672 1673 p_hxge_dma_common_t *dma_rcr_cntl_p;
1673 1674 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1674 1675 p_hxge_dma_common_t *dma_mbox_cntl_p;
1675 1676 uint32_t *num_chunks;
1676 1677
1677 1678 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1678 1679
1679 1680 dma_poolp = hxgep->rx_buf_pool_p;
1680 1681 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1681 1682 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1682 1683 "(null rx buf pool or buf not allocated"));
1683 1684 return;
1684 1685 }
1685 1686
1686 1687 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1687 1688 if (dma_rbr_cntl_poolp == NULL ||
1688 1689 (!dma_rbr_cntl_poolp->buf_allocated)) {
1689 1690 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1690 1691 "<== hxge_free_rx_mem_pool "
1691 1692 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1692 1693 return;
1693 1694 }
1694 1695
1695 1696 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1696 1697 if (dma_rcr_cntl_poolp == NULL ||
1697 1698 (!dma_rcr_cntl_poolp->buf_allocated)) {
1698 1699 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1699 1700 "<== hxge_free_rx_mem_pool "
1700 1701 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1701 1702 return;
1702 1703 }
1703 1704
1704 1705 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1705 1706 if (dma_mbox_cntl_poolp == NULL ||
1706 1707 (!dma_mbox_cntl_poolp->buf_allocated)) {
1707 1708 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1708 1709 "<== hxge_free_rx_mem_pool "
1709 1710 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1710 1711 return;
1711 1712 }
1712 1713
1713 1714 dma_buf_p = dma_poolp->dma_buf_pool_p;
1714 1715 num_chunks = dma_poolp->num_chunks;
1715 1716
1716 1717 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1717 1718 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1718 1719 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1719 1720 ndmas = dma_rbr_cntl_poolp->ndmas;
1720 1721
1721 1722 for (i = 0; i < ndmas; i++) {
1722 1723 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1723 1724 }
1724 1725
1725 1726 for (i = 0; i < ndmas; i++) {
1726 1727 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1727 1728 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1728 1729 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1729 1730 }
1730 1731
1731 1732 for (i = 0; i < ndmas; i++) {
1732 1733 KMEM_FREE(dma_buf_p[i],
1733 1734 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1734 1735 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1735 1736 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1736 1737 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1737 1738 }
1738 1739
1739 1740 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1740 1741 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1741 1742 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1742 1743 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1743 1744 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1744 1745 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1745 1746 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1746 1747 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1747 1748 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1748 1749
1749 1750 hxgep->rx_buf_pool_p = NULL;
1750 1751 hxgep->rx_rbr_cntl_pool_p = NULL;
1751 1752 hxgep->rx_rcr_cntl_pool_p = NULL;
1752 1753 hxgep->rx_mbox_cntl_pool_p = NULL;
1753 1754
1754 1755 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1755 1756 }
1756 1757
1757 1758 static hxge_status_t
1758 1759 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1759 1760 p_hxge_dma_common_t *dmap,
1760 1761 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1761 1762 {
1762 1763 p_hxge_dma_common_t rx_dmap;
1763 1764 hxge_status_t status = HXGE_OK;
1764 1765 size_t total_alloc_size;
1765 1766 size_t allocated = 0;
1766 1767 int i, size_index, array_size;
1767 1768
1768 1769 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1769 1770
1770 1771 rx_dmap = (p_hxge_dma_common_t)
1771 1772 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1772 1773
1773 1774 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1774 1775 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1775 1776 dma_channel, alloc_size, block_size, dmap));
1776 1777
1777 1778 total_alloc_size = alloc_size;
1778 1779
1779 1780 i = 0;
1780 1781 size_index = 0;
1781 1782 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1782 1783 while ((size_index < array_size) &&
1783 1784 (alloc_sizes[size_index] < alloc_size))
1784 1785 size_index++;
1785 1786 if (size_index >= array_size) {
1786 1787 size_index = array_size - 1;
1787 1788 }
1788 1789
1789 1790 while ((allocated < total_alloc_size) &&
1790 1791 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1791 1792 rx_dmap[i].dma_chunk_index = i;
1792 1793 rx_dmap[i].block_size = block_size;
1793 1794 rx_dmap[i].alength = alloc_sizes[size_index];
1794 1795 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1795 1796 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1796 1797 rx_dmap[i].dma_channel = dma_channel;
1797 1798 rx_dmap[i].contig_alloc_type = B_FALSE;
1798 1799
1799 1800 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1800 1801 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1801 1802 "i %d nblocks %d alength %d",
1802 1803 dma_channel, i, &rx_dmap[i], block_size,
1803 1804 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1804 1805 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1805 1806 &hxge_rx_dma_attr, rx_dmap[i].alength,
1806 1807 &hxge_dev_buf_dma_acc_attr,
1807 1808 DDI_DMA_READ | DDI_DMA_STREAMING,
1808 1809 (p_hxge_dma_common_t)(&rx_dmap[i]));
1809 1810 if (status != HXGE_OK) {
1810 1811 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1811 1812 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1812 1813 " for size: %d", alloc_sizes[size_index]));
1813 1814 size_index--;
1814 1815 } else {
1815 1816 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1816 1817 " alloc_rx_buf_dma allocated rdc %d "
1817 1818 "chunk %d size %x dvma %x bufp %llx ",
1818 1819 dma_channel, i, rx_dmap[i].alength,
1819 1820 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1820 1821 i++;
1821 1822 allocated += alloc_sizes[size_index];
1822 1823 }
1823 1824 }
1824 1825
1825 1826 if (allocated < total_alloc_size) {
1826 1827 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1827 1828 " hxge_alloc_rx_buf_dma failed due to"
1828 1829 " allocated(%d) < required(%d)",
1829 1830 allocated, total_alloc_size));
1830 1831 goto hxge_alloc_rx_mem_fail1;
1831 1832 }
1832 1833
1833 1834 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1834 1835 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1835 1836
1836 1837 *num_chunks = i;
1837 1838 *dmap = rx_dmap;
1838 1839
1839 1840 goto hxge_alloc_rx_mem_exit;
1840 1841
1841 1842 hxge_alloc_rx_mem_fail1:
1842 1843 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1843 1844
1844 1845 hxge_alloc_rx_mem_exit:
1845 1846 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1846 1847 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1847 1848
1848 1849 return (status);
1849 1850 }
1850 1851
1851 1852 /*ARGSUSED*/
1852 1853 static void
1853 1854 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1854 1855 uint32_t num_chunks)
1855 1856 {
1856 1857 int i;
1857 1858
1858 1859 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1859 1860 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1860 1861
1861 1862 for (i = 0; i < num_chunks; i++) {
1862 1863 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1863 1864 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1864 1865 hxge_dma_mem_free(dmap++);
1865 1866 }
1866 1867
1867 1868 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1868 1869 }
1869 1870
1870 1871 /*ARGSUSED*/
1871 1872 static hxge_status_t
1872 1873 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1873 1874 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1874 1875 {
1875 1876 p_hxge_dma_common_t rx_dmap;
1876 1877 hxge_status_t status = HXGE_OK;
1877 1878
1878 1879 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1879 1880
1880 1881 rx_dmap = (p_hxge_dma_common_t)
1881 1882 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1882 1883
1883 1884 rx_dmap->contig_alloc_type = B_FALSE;
1884 1885
1885 1886 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1886 1887 attr, size, &hxge_dev_desc_dma_acc_attr,
1887 1888 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1888 1889 if (status != HXGE_OK) {
1889 1890 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1890 1891 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1891 1892 " for size: %d", size));
1892 1893 goto hxge_alloc_rx_cntl_dma_fail1;
1893 1894 }
1894 1895
1895 1896 *dmap = rx_dmap;
1896 1897
1897 1898 goto hxge_alloc_rx_cntl_dma_exit;
1898 1899
1899 1900 hxge_alloc_rx_cntl_dma_fail1:
1900 1901 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1901 1902
1902 1903 hxge_alloc_rx_cntl_dma_exit:
1903 1904 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1904 1905 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1905 1906
1906 1907 return (status);
1907 1908 }
1908 1909
1909 1910 /*ARGSUSED*/
1910 1911 static void
1911 1912 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1912 1913 {
1913 1914 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1914 1915
1915 1916 hxge_dma_mem_free(dmap);
1916 1917
1917 1918 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1918 1919 }
1919 1920
1920 1921 static hxge_status_t
1921 1922 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1922 1923 {
1923 1924 hxge_status_t status = HXGE_OK;
1924 1925 int i, j;
1925 1926 uint32_t ndmas, st_tdc;
1926 1927 p_hxge_dma_pt_cfg_t p_all_cfgp;
1927 1928 p_hxge_hw_pt_cfg_t p_cfgp;
1928 1929 p_hxge_dma_pool_t dma_poolp;
1929 1930 p_hxge_dma_common_t *dma_buf_p;
1930 1931 p_hxge_dma_pool_t dma_cntl_poolp;
1931 1932 p_hxge_dma_common_t *dma_cntl_p;
1932 1933 size_t tx_buf_alloc_size;
1933 1934 size_t tx_cntl_alloc_size;
1934 1935 uint32_t *num_chunks; /* per dma */
1935 1936
1936 1937 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1937 1938
1938 1939 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1939 1940 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1940 1941 st_tdc = p_cfgp->start_tdc;
1941 1942 ndmas = p_cfgp->max_tdcs;
1942 1943
1943 1944 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1944 1945 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1945 1946 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1946 1947 /*
1947 1948 * Allocate memory for each transmit DMA channel.
1948 1949 */
1949 1950 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1950 1951 KM_SLEEP);
1951 1952 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1952 1953 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1953 1954
1954 1955 dma_cntl_poolp = (p_hxge_dma_pool_t)
1955 1956 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1956 1957 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1957 1958 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1958 1959
1959 1960 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1960 1961
1961 1962 /*
1962 1963 * Assume that each DMA channel will be configured with default
1963 1964 * transmit bufer size for copying transmit data. (For packet payload
1964 1965 * over this limit, packets will not be copied.)
1965 1966 */
1966 1967 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1967 1968
1968 1969 /*
1969 1970 * Addresses of transmit descriptor ring and the mailbox must be all
1970 1971 * cache-aligned (64 bytes).
1971 1972 */
1972 1973 tx_cntl_alloc_size = hxge_tx_ring_size;
1973 1974 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1974 1975 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1975 1976
1976 1977 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1977 1978 KM_SLEEP);
1978 1979
1979 1980 /*
1980 1981 * Allocate memory for transmit buffers and descriptor rings. Replace
1981 1982 * allocation functions with interface functions provided by the
1982 1983 * partition manager when it is available.
1983 1984 *
1984 1985 * Allocate memory for the transmit buffer pool.
1985 1986 */
1986 1987 for (i = 0; i < ndmas; i++) {
1987 1988 num_chunks[i] = 0;
1988 1989 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1989 1990 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1990 1991 if (status != HXGE_OK) {
1991 1992 break;
1992 1993 }
1993 1994 st_tdc++;
1994 1995 }
1995 1996
1996 1997 if (i < ndmas) {
1997 1998 goto hxge_alloc_tx_mem_pool_fail1;
1998 1999 }
1999 2000
2000 2001 st_tdc = p_cfgp->start_tdc;
2001 2002
2002 2003 /*
2003 2004 * Allocate memory for descriptor rings and mailbox.
2004 2005 */
2005 2006 for (j = 0; j < ndmas; j++) {
2006 2007 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2007 2008 tx_cntl_alloc_size);
2008 2009 if (status != HXGE_OK) {
2009 2010 break;
2010 2011 }
2011 2012 st_tdc++;
2012 2013 }
2013 2014
2014 2015 if (j < ndmas) {
2015 2016 goto hxge_alloc_tx_mem_pool_fail2;
2016 2017 }
2017 2018
2018 2019 dma_poolp->ndmas = ndmas;
2019 2020 dma_poolp->num_chunks = num_chunks;
2020 2021 dma_poolp->buf_allocated = B_TRUE;
2021 2022 dma_poolp->dma_buf_pool_p = dma_buf_p;
2022 2023 hxgep->tx_buf_pool_p = dma_poolp;
2023 2024
2024 2025 dma_cntl_poolp->ndmas = ndmas;
2025 2026 dma_cntl_poolp->buf_allocated = B_TRUE;
2026 2027 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2027 2028 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2028 2029
2029 2030 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2030 2031 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2031 2032 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2032 2033
2033 2034 goto hxge_alloc_tx_mem_pool_exit;
2034 2035
2035 2036 hxge_alloc_tx_mem_pool_fail2:
2036 2037 /* Free control buffers */
2037 2038 j--;
2038 2039 for (; j >= 0; j--) {
2039 2040 hxge_free_tx_cntl_dma(hxgep,
2040 2041 (p_hxge_dma_common_t)dma_cntl_p[j]);
2041 2042 }
2042 2043
2043 2044 hxge_alloc_tx_mem_pool_fail1:
2044 2045 /* Free data buffers */
2045 2046 i--;
2046 2047 for (; i >= 0; i--) {
2047 2048 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2048 2049 num_chunks[i]);
2049 2050 }
2050 2051
2051 2052 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2052 2053 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2053 2054 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2054 2055 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2055 2056 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2056 2057
2057 2058 hxge_alloc_tx_mem_pool_exit:
2058 2059 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2059 2060 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2060 2061
2061 2062 return (status);
2062 2063 }
2063 2064
2064 2065 static hxge_status_t
2065 2066 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2066 2067 p_hxge_dma_common_t *dmap, size_t alloc_size,
2067 2068 size_t block_size, uint32_t *num_chunks)
2068 2069 {
2069 2070 p_hxge_dma_common_t tx_dmap;
2070 2071 hxge_status_t status = HXGE_OK;
2071 2072 size_t total_alloc_size;
2072 2073 size_t allocated = 0;
2073 2074 int i, size_index, array_size;
2074 2075
2075 2076 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2076 2077
2077 2078 tx_dmap = (p_hxge_dma_common_t)
2078 2079 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2079 2080
2080 2081 total_alloc_size = alloc_size;
2081 2082 i = 0;
2082 2083 size_index = 0;
2083 2084 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2084 2085 while ((size_index < array_size) &&
2085 2086 (alloc_sizes[size_index] < alloc_size))
2086 2087 size_index++;
2087 2088 if (size_index >= array_size) {
2088 2089 size_index = array_size - 1;
2089 2090 }
2090 2091
2091 2092 while ((allocated < total_alloc_size) &&
2092 2093 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2093 2094 tx_dmap[i].dma_chunk_index = i;
2094 2095 tx_dmap[i].block_size = block_size;
2095 2096 tx_dmap[i].alength = alloc_sizes[size_index];
2096 2097 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2097 2098 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2098 2099 tx_dmap[i].dma_channel = dma_channel;
2099 2100 tx_dmap[i].contig_alloc_type = B_FALSE;
2100 2101
2101 2102 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2102 2103 &hxge_tx_dma_attr, tx_dmap[i].alength,
2103 2104 &hxge_dev_buf_dma_acc_attr,
2104 2105 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2105 2106 (p_hxge_dma_common_t)(&tx_dmap[i]));
2106 2107 if (status != HXGE_OK) {
2107 2108 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2108 2109 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2109 2110 " for size: %d", alloc_sizes[size_index]));
2110 2111 size_index--;
2111 2112 } else {
2112 2113 i++;
2113 2114 allocated += alloc_sizes[size_index];
2114 2115 }
2115 2116 }
2116 2117
2117 2118 if (allocated < total_alloc_size) {
2118 2119 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2119 2120 " hxge_alloc_tx_buf_dma: failed due to"
2120 2121 " allocated(%d) < required(%d)",
2121 2122 allocated, total_alloc_size));
2122 2123 goto hxge_alloc_tx_mem_fail1;
2123 2124 }
2124 2125
2125 2126 *num_chunks = i;
2126 2127 *dmap = tx_dmap;
2127 2128 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2128 2129 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2129 2130 *dmap, i));
2130 2131 goto hxge_alloc_tx_mem_exit;
2131 2132
2132 2133 hxge_alloc_tx_mem_fail1:
2133 2134 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2134 2135
2135 2136 hxge_alloc_tx_mem_exit:
2136 2137 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2137 2138 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2138 2139
2139 2140 return (status);
2140 2141 }
2141 2142
2142 2143 /*ARGSUSED*/
2143 2144 static void
2144 2145 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2145 2146 uint32_t num_chunks)
2146 2147 {
2147 2148 int i;
2148 2149
2149 2150 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2150 2151
2151 2152 for (i = 0; i < num_chunks; i++) {
2152 2153 hxge_dma_mem_free(dmap++);
2153 2154 }
2154 2155
2155 2156 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2156 2157 }
2157 2158
2158 2159 /*ARGSUSED*/
2159 2160 static hxge_status_t
2160 2161 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2161 2162 p_hxge_dma_common_t *dmap, size_t size)
2162 2163 {
2163 2164 p_hxge_dma_common_t tx_dmap;
2164 2165 hxge_status_t status = HXGE_OK;
2165 2166
2166 2167 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2167 2168
2168 2169 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2169 2170 KM_SLEEP);
2170 2171
2171 2172 tx_dmap->contig_alloc_type = B_FALSE;
2172 2173
2173 2174 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2174 2175 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2175 2176 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2176 2177 if (status != HXGE_OK) {
2177 2178 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2178 2179 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2179 2180 " for size: %d", size));
2180 2181 goto hxge_alloc_tx_cntl_dma_fail1;
2181 2182 }
2182 2183
2183 2184 *dmap = tx_dmap;
2184 2185
2185 2186 goto hxge_alloc_tx_cntl_dma_exit;
2186 2187
2187 2188 hxge_alloc_tx_cntl_dma_fail1:
2188 2189 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2189 2190
2190 2191 hxge_alloc_tx_cntl_dma_exit:
2191 2192 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2192 2193 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2193 2194
2194 2195 return (status);
2195 2196 }
2196 2197
2197 2198 /*ARGSUSED*/
2198 2199 static void
2199 2200 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2200 2201 {
2201 2202 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2202 2203
2203 2204 hxge_dma_mem_free(dmap);
2204 2205
2205 2206 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2206 2207 }
2207 2208
2208 2209 static void
2209 2210 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2210 2211 {
2211 2212 uint32_t i, ndmas;
2212 2213 p_hxge_dma_pool_t dma_poolp;
2213 2214 p_hxge_dma_common_t *dma_buf_p;
2214 2215 p_hxge_dma_pool_t dma_cntl_poolp;
2215 2216 p_hxge_dma_common_t *dma_cntl_p;
2216 2217 uint32_t *num_chunks;
2217 2218
2218 2219 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2219 2220
2220 2221 dma_poolp = hxgep->tx_buf_pool_p;
2221 2222 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2222 2223 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2223 2224 "<== hxge_free_tx_mem_pool "
2224 2225 "(null rx buf pool or buf not allocated"));
2225 2226 return;
2226 2227 }
2227 2228
2228 2229 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2229 2230 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2230 2231 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2231 2232 "<== hxge_free_tx_mem_pool "
2232 2233 "(null tx cntl buf pool or cntl buf not allocated"));
2233 2234 return;
2234 2235 }
2235 2236
2236 2237 dma_buf_p = dma_poolp->dma_buf_pool_p;
2237 2238 num_chunks = dma_poolp->num_chunks;
2238 2239
2239 2240 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2240 2241 ndmas = dma_cntl_poolp->ndmas;
2241 2242
2242 2243 for (i = 0; i < ndmas; i++) {
2243 2244 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2244 2245 }
2245 2246
2246 2247 for (i = 0; i < ndmas; i++) {
2247 2248 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2248 2249 }
2249 2250
2250 2251 for (i = 0; i < ndmas; i++) {
2251 2252 KMEM_FREE(dma_buf_p[i],
2252 2253 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2253 2254 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2254 2255 }
2255 2256
2256 2257 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2257 2258 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2258 2259 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2259 2260 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2260 2261 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2261 2262
2262 2263 hxgep->tx_buf_pool_p = NULL;
2263 2264 hxgep->tx_cntl_pool_p = NULL;
2264 2265
2265 2266 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2266 2267 }
2267 2268
2268 2269 /*ARGSUSED*/
2269 2270 static hxge_status_t
2270 2271 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2271 2272 struct ddi_dma_attr *dma_attrp,
2272 2273 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2273 2274 p_hxge_dma_common_t dma_p)
2274 2275 {
2275 2276 caddr_t kaddrp;
2276 2277 int ddi_status = DDI_SUCCESS;
2277 2278
2278 2279 dma_p->dma_handle = NULL;
2279 2280 dma_p->acc_handle = NULL;
2280 2281 dma_p->kaddrp = NULL;
2281 2282
2282 2283 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2283 2284 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2284 2285 if (ddi_status != DDI_SUCCESS) {
2285 2286 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2286 2287 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2287 2288 return (HXGE_ERROR | HXGE_DDI_FAILED);
2288 2289 }
2289 2290
2290 2291 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2291 2292 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2292 2293 &dma_p->acc_handle);
2293 2294 if (ddi_status != DDI_SUCCESS) {
2294 2295 /* The caller will decide whether it is fatal */
2295 2296 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2296 2297 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2297 2298 ddi_dma_free_handle(&dma_p->dma_handle);
2298 2299 dma_p->dma_handle = NULL;
2299 2300 return (HXGE_ERROR | HXGE_DDI_FAILED);
2300 2301 }
2301 2302
2302 2303 if (dma_p->alength < length) {
2303 2304 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2304 2305 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2305 2306 ddi_dma_mem_free(&dma_p->acc_handle);
2306 2307 ddi_dma_free_handle(&dma_p->dma_handle);
2307 2308 dma_p->acc_handle = NULL;
2308 2309 dma_p->dma_handle = NULL;
2309 2310 return (HXGE_ERROR);
2310 2311 }
2311 2312
2312 2313 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2313 2314 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2314 2315 &dma_p->dma_cookie, &dma_p->ncookies);
2315 2316 if (ddi_status != DDI_DMA_MAPPED) {
2316 2317 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2317 2318 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2318 2319 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2319 2320 if (dma_p->acc_handle) {
2320 2321 ddi_dma_mem_free(&dma_p->acc_handle);
2321 2322 dma_p->acc_handle = NULL;
2322 2323 }
2323 2324 ddi_dma_free_handle(&dma_p->dma_handle);
2324 2325 dma_p->dma_handle = NULL;
2325 2326 return (HXGE_ERROR | HXGE_DDI_FAILED);
2326 2327 }
2327 2328
2328 2329 if (dma_p->ncookies != 1) {
2329 2330 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2330 2331 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2331 2332 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2332 2333 if (dma_p->acc_handle) {
2333 2334 ddi_dma_mem_free(&dma_p->acc_handle);
2334 2335 dma_p->acc_handle = NULL;
2335 2336 }
2336 2337 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2337 2338 ddi_dma_free_handle(&dma_p->dma_handle);
2338 2339 dma_p->dma_handle = NULL;
2339 2340 return (HXGE_ERROR);
2340 2341 }
2341 2342
2342 2343 dma_p->kaddrp = kaddrp;
2343 2344 #if defined(__i386)
2344 2345 dma_p->ioaddr_pp =
2345 2346 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2346 2347 #else
2347 2348 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2348 2349 #endif
2349 2350
2350 2351 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2351 2352
2352 2353 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2353 2354 "dma buffer allocated: dma_p $%p "
2354 2355 "return dmac_ladress from cookie $%p dmac_size %d "
2355 2356 "dma_p->ioaddr_p $%p "
2356 2357 "dma_p->orig_ioaddr_p $%p "
2357 2358 "orig_vatopa $%p "
2358 2359 "alength %d (0x%x) "
2359 2360 "kaddrp $%p "
2360 2361 "length %d (0x%x)",
2361 2362 dma_p,
2362 2363 dma_p->dma_cookie.dmac_laddress,
2363 2364 dma_p->dma_cookie.dmac_size,
2364 2365 dma_p->ioaddr_pp,
2365 2366 dma_p->orig_ioaddr_pp,
2366 2367 dma_p->orig_vatopa,
2367 2368 dma_p->alength, dma_p->alength,
2368 2369 kaddrp,
2369 2370 length, length));
2370 2371
2371 2372 return (HXGE_OK);
2372 2373 }
2373 2374
2374 2375 static void
2375 2376 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2376 2377 {
2377 2378 if (dma_p == NULL)
2378 2379 return;
2379 2380
2380 2381 if (dma_p->dma_handle != NULL) {
2381 2382 if (dma_p->ncookies) {
2382 2383 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2383 2384 dma_p->ncookies = 0;
2384 2385 }
2385 2386 ddi_dma_free_handle(&dma_p->dma_handle);
2386 2387 dma_p->dma_handle = NULL;
2387 2388 }
2388 2389
2389 2390 if (dma_p->acc_handle != NULL) {
2390 2391 ddi_dma_mem_free(&dma_p->acc_handle);
2391 2392 dma_p->acc_handle = NULL;
2392 2393 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2393 2394 }
2394 2395
2395 2396 dma_p->kaddrp = NULL;
2396 2397 dma_p->alength = NULL;
2397 2398 }
2398 2399
2399 2400 /*
2400 2401 * hxge_m_start() -- start transmitting and receiving.
2401 2402 *
2402 2403 * This function is called by the MAC layer when the first
2403 2404 * stream is open to prepare the hardware ready for sending
2404 2405 * and transmitting packets.
2405 2406 */
2406 2407 static int
2407 2408 hxge_m_start(void *arg)
2408 2409 {
2409 2410 p_hxge_t hxgep = (p_hxge_t)arg;
2410 2411
2411 2412 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2412 2413
2413 2414 MUTEX_ENTER(hxgep->genlock);
2414 2415
2415 2416 if (hxge_init(hxgep) != DDI_SUCCESS) {
2416 2417 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2417 2418 "<== hxge_m_start: initialization failed"));
2418 2419 MUTEX_EXIT(hxgep->genlock);
2419 2420 return (EIO);
2420 2421 }
2421 2422
2422 2423 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2423 2424 /*
2424 2425 * Start timer to check the system error and tx hangs
2425 2426 */
2426 2427 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2427 2428 hxge_check_hw_state, HXGE_CHECK_TIMER);
2428 2429
2429 2430 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2430 2431
2431 2432 hxgep->timeout.link_status = 0;
2432 2433 hxgep->timeout.report_link_status = B_TRUE;
2433 2434 hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2434 2435
2435 2436 /* Start the link status timer to check the link status */
2436 2437 MUTEX_ENTER(&hxgep->timeout.lock);
2437 2438 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2438 2439 hxgep->timeout.ticks);
2439 2440 MUTEX_EXIT(&hxgep->timeout.lock);
2440 2441 }
2441 2442
2442 2443 MUTEX_EXIT(hxgep->genlock);
2443 2444
2444 2445 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2445 2446
2446 2447 return (0);
2447 2448 }
2448 2449
2449 2450 /*
2450 2451 * hxge_m_stop(): stop transmitting and receiving.
2451 2452 */
2452 2453 static void
2453 2454 hxge_m_stop(void *arg)
2454 2455 {
2455 2456 p_hxge_t hxgep = (p_hxge_t)arg;
2456 2457
2457 2458 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2458 2459
2459 2460 if (hxgep->hxge_timerid) {
2460 2461 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2461 2462 hxgep->hxge_timerid = 0;
2462 2463 }
2463 2464
2464 2465 /* Stop the link status timer before unregistering */
2465 2466 MUTEX_ENTER(&hxgep->timeout.lock);
2466 2467 if (hxgep->timeout.id) {
2467 2468 (void) untimeout(hxgep->timeout.id);
2468 2469 hxgep->timeout.id = 0;
2469 2470 }
2470 2471 hxge_link_update(hxgep, LINK_STATE_DOWN);
2471 2472 MUTEX_EXIT(&hxgep->timeout.lock);
2472 2473
2473 2474 MUTEX_ENTER(hxgep->genlock);
2474 2475
2475 2476 hxge_uninit(hxgep);
2476 2477
2477 2478 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2478 2479
2479 2480 MUTEX_EXIT(hxgep->genlock);
2480 2481
2481 2482 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2482 2483 }
2483 2484
2484 2485 static int
2485 2486 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2486 2487 {
2487 2488 p_hxge_t hxgep = (p_hxge_t)arg;
2488 2489 struct ether_addr addrp;
2489 2490
2490 2491 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2491 2492
2492 2493 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2493 2494
2494 2495 if (add) {
2495 2496 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2496 2497 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2497 2498 "<== hxge_m_multicst: add multicast failed"));
2498 2499 return (EINVAL);
2499 2500 }
2500 2501 } else {
2501 2502 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2502 2503 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2503 2504 "<== hxge_m_multicst: del multicast failed"));
2504 2505 return (EINVAL);
2505 2506 }
2506 2507 }
2507 2508
2508 2509 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2509 2510
2510 2511 return (0);
2511 2512 }
2512 2513
2513 2514 static int
2514 2515 hxge_m_promisc(void *arg, boolean_t on)
2515 2516 {
2516 2517 p_hxge_t hxgep = (p_hxge_t)arg;
2517 2518
2518 2519 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2519 2520
2520 2521 if (hxge_set_promisc(hxgep, on)) {
2521 2522 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2522 2523 "<== hxge_m_promisc: set promisc failed"));
2523 2524 return (EINVAL);
2524 2525 }
2525 2526
2526 2527 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2527 2528
2528 2529 return (0);
2529 2530 }
2530 2531
2531 2532 static void
2532 2533 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2533 2534 {
2534 2535 p_hxge_t hxgep = (p_hxge_t)arg;
2535 2536 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2536 2537 boolean_t need_privilege;
2537 2538 int err;
2538 2539 int cmd;
2539 2540
2540 2541 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2541 2542
2542 2543 iocp = (struct iocblk *)mp->b_rptr;
2543 2544 iocp->ioc_error = 0;
2544 2545 need_privilege = B_TRUE;
2545 2546 cmd = iocp->ioc_cmd;
2546 2547
2547 2548 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2548 2549 switch (cmd) {
2549 2550 default:
2550 2551 miocnak(wq, mp, 0, EINVAL);
2551 2552 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2552 2553 return;
2553 2554
2554 2555 case LB_GET_INFO_SIZE:
2555 2556 case LB_GET_INFO:
2556 2557 case LB_GET_MODE:
2557 2558 need_privilege = B_FALSE;
2558 2559 break;
2559 2560
2560 2561 case LB_SET_MODE:
2561 2562 break;
2562 2563
2563 2564 case ND_GET:
2564 2565 need_privilege = B_FALSE;
2565 2566 break;
2566 2567 case ND_SET:
2567 2568 break;
2568 2569
2569 2570 case HXGE_GET_TX_RING_SZ:
2570 2571 case HXGE_GET_TX_DESC:
2571 2572 case HXGE_TX_SIDE_RESET:
2572 2573 case HXGE_RX_SIDE_RESET:
2573 2574 case HXGE_GLOBAL_RESET:
2574 2575 case HXGE_RESET_MAC:
2575 2576 case HXGE_PUT_TCAM:
2576 2577 case HXGE_GET_TCAM:
2577 2578 case HXGE_RTRACE:
2578 2579
2579 2580 need_privilege = B_FALSE;
2580 2581 break;
2581 2582 }
2582 2583
2583 2584 if (need_privilege) {
2584 2585 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2585 2586 if (err != 0) {
2586 2587 miocnak(wq, mp, 0, err);
2587 2588 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2588 2589 "<== hxge_m_ioctl: no priv"));
2589 2590 return;
2590 2591 }
2591 2592 }
2592 2593
2593 2594 switch (cmd) {
2594 2595 case ND_GET:
2595 2596 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2596 2597 case ND_SET:
2597 2598 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2598 2599 hxge_param_ioctl(hxgep, wq, mp, iocp);
2599 2600 break;
2600 2601
2601 2602 case LB_GET_MODE:
2602 2603 case LB_SET_MODE:
2603 2604 case LB_GET_INFO_SIZE:
2604 2605 case LB_GET_INFO:
2605 2606 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2606 2607 break;
2607 2608
2608 2609 case HXGE_PUT_TCAM:
2609 2610 case HXGE_GET_TCAM:
2610 2611 case HXGE_GET_TX_RING_SZ:
2611 2612 case HXGE_GET_TX_DESC:
2612 2613 case HXGE_TX_SIDE_RESET:
2613 2614 case HXGE_RX_SIDE_RESET:
2614 2615 case HXGE_GLOBAL_RESET:
2615 2616 case HXGE_RESET_MAC:
2616 2617 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2617 2618 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2618 2619 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2619 2620 break;
2620 2621 }
2621 2622
2622 2623 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2623 2624 }
2624 2625
2625 2626 /*ARGSUSED*/
2626 2627 static int
2627 2628 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2628 2629 {
2629 2630 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2630 2631 p_hxge_t hxgep;
2631 2632 p_tx_ring_t ring;
2632 2633
2633 2634 ASSERT(rhp != NULL);
2634 2635 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2635 2636
2636 2637 hxgep = rhp->hxgep;
2637 2638
2638 2639 /*
2639 2640 * Get the ring pointer.
2640 2641 */
2641 2642 ring = hxgep->tx_rings->rings[rhp->index];
2642 2643
2643 2644 /*
2644 2645 * Fill in the handle for the transmit.
2645 2646 */
2646 2647 MUTEX_ENTER(&ring->lock);
2647 2648 rhp->started = B_TRUE;
2648 2649 ring->ring_handle = rhp->ring_handle;
2649 2650 MUTEX_EXIT(&ring->lock);
2650 2651
2651 2652 return (0);
2652 2653 }
2653 2654
2654 2655 static void
2655 2656 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2656 2657 {
2657 2658 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2658 2659 p_hxge_t hxgep;
2659 2660 p_tx_ring_t ring;
2660 2661
2661 2662 ASSERT(rhp != NULL);
2662 2663 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2663 2664
2664 2665 hxgep = rhp->hxgep;
2665 2666 ring = hxgep->tx_rings->rings[rhp->index];
2666 2667
2667 2668 MUTEX_ENTER(&ring->lock);
2668 2669 ring->ring_handle = (mac_ring_handle_t)NULL;
2669 2670 rhp->started = B_FALSE;
2670 2671 MUTEX_EXIT(&ring->lock);
2671 2672 }
2672 2673
2673 2674 static int
2674 2675 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2675 2676 {
2676 2677 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2677 2678 p_hxge_t hxgep;
2678 2679 p_rx_rcr_ring_t ring;
2679 2680 int i;
2680 2681
2681 2682 ASSERT(rhp != NULL);
2682 2683 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2683 2684
2684 2685 hxgep = rhp->hxgep;
2685 2686
2686 2687 /*
2687 2688 * Get pointer to ring.
2688 2689 */
2689 2690 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2690 2691
2691 2692 MUTEX_ENTER(&ring->lock);
2692 2693
2693 2694 if (rhp->started) {
2694 2695 MUTEX_EXIT(&ring->lock);
2695 2696 return (0);
2696 2697 }
2697 2698
2698 2699 /*
2699 2700 * Set the ldvp and ldgp pointers to enable/disable
2700 2701 * polling.
2701 2702 */
2702 2703 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2703 2704 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2704 2705 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2705 2706 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2706 2707 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2707 2708 break;
2708 2709 }
2709 2710 }
2710 2711
2711 2712 rhp->started = B_TRUE;
2712 2713 ring->rcr_mac_handle = rhp->ring_handle;
2713 2714 ring->rcr_gen_num = mr_gen_num;
2714 2715 MUTEX_EXIT(&ring->lock);
2715 2716
2716 2717 return (0);
2717 2718 }
2718 2719
2719 2720 static void
2720 2721 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2721 2722 {
2722 2723 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2723 2724 p_hxge_t hxgep;
2724 2725 p_rx_rcr_ring_t ring;
2725 2726
2726 2727 ASSERT(rhp != NULL);
2727 2728 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2728 2729
2729 2730 hxgep = rhp->hxgep;
2730 2731 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2731 2732
2732 2733 MUTEX_ENTER(&ring->lock);
2733 2734 rhp->started = B_TRUE;
2734 2735 ring->rcr_mac_handle = NULL;
2735 2736 ring->ldvp = NULL;
2736 2737 ring->ldgp = NULL;
2737 2738 MUTEX_EXIT(&ring->lock);
2738 2739 }
2739 2740
2740 2741 static int
2741 2742 hxge_rx_group_start(mac_group_driver_t gdriver)
2742 2743 {
2743 2744 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2744 2745
2745 2746 ASSERT(group->hxgep != NULL);
2746 2747 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2747 2748
2748 2749 MUTEX_ENTER(group->hxgep->genlock);
2749 2750 group->started = B_TRUE;
2750 2751 MUTEX_EXIT(group->hxgep->genlock);
2751 2752
2752 2753 return (0);
2753 2754 }
2754 2755
2755 2756 static void
2756 2757 hxge_rx_group_stop(mac_group_driver_t gdriver)
2757 2758 {
2758 2759 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2759 2760
2760 2761 ASSERT(group->hxgep != NULL);
2761 2762 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2762 2763 ASSERT(group->started == B_TRUE);
2763 2764
2764 2765 MUTEX_ENTER(group->hxgep->genlock);
2765 2766 group->started = B_FALSE;
2766 2767 MUTEX_EXIT(group->hxgep->genlock);
2767 2768 }
2768 2769
2769 2770 static int
2770 2771 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2771 2772 {
2772 2773 int i;
2773 2774
2774 2775 /*
2775 2776 * Find an open slot.
2776 2777 */
2777 2778 for (i = 0; i < hxgep->mmac.total; i++) {
2778 2779 if (!hxgep->mmac.addrs[i].set) {
2779 2780 *slot = i;
2780 2781 return (0);
2781 2782 }
2782 2783 }
2783 2784
2784 2785 return (ENXIO);
2785 2786 }
2786 2787
2787 2788 static int
2788 2789 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2789 2790 {
2790 2791 struct ether_addr eaddr;
2791 2792 hxge_status_t status = HXGE_OK;
2792 2793
2793 2794 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2794 2795
2795 2796 /*
2796 2797 * Set new interface local address and re-init device.
2797 2798 * This is destructive to any other streams attached
2798 2799 * to this device.
2799 2800 */
2800 2801 RW_ENTER_WRITER(&hxgep->filter_lock);
2801 2802 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2802 2803 RW_EXIT(&hxgep->filter_lock);
2803 2804 if (status != HXGE_OK)
2804 2805 return (status);
2805 2806
2806 2807 hxgep->mmac.addrs[slot].set = B_TRUE;
2807 2808 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2808 2809 hxgep->mmac.available--;
2809 2810 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2810 2811 hxgep->mmac.addrs[slot].primary = B_TRUE;
2811 2812
2812 2813 return (0);
2813 2814 }
2814 2815
2815 2816 static int
2816 2817 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2817 2818 {
2818 2819 int i, result;
2819 2820
2820 2821 for (i = 0; i < hxgep->mmac.total; i++) {
2821 2822 if (hxgep->mmac.addrs[i].set) {
2822 2823 result = memcmp(hxgep->mmac.addrs[i].addr,
2823 2824 addr, ETHERADDRL);
2824 2825 if (result == 0) {
2825 2826 *slot = i;
2826 2827 return (0);
2827 2828 }
2828 2829 }
2829 2830 }
2830 2831
2831 2832 return (EINVAL);
2832 2833 }
2833 2834
2834 2835 static int
2835 2836 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2836 2837 {
2837 2838 hxge_status_t status;
2838 2839 int i;
2839 2840
2840 2841 status = hxge_pfc_clear_mac_address(hxgep, slot);
2841 2842 if (status != HXGE_OK)
2842 2843 return (status);
2843 2844
2844 2845 for (i = 0; i < ETHERADDRL; i++)
2845 2846 hxgep->mmac.addrs[slot].addr[i] = 0;
2846 2847
2847 2848 hxgep->mmac.addrs[slot].set = B_FALSE;
2848 2849 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2849 2850 hxgep->mmac.addrs[slot].primary = B_FALSE;
2850 2851 hxgep->mmac.available++;
2851 2852
2852 2853 return (0);
2853 2854 }
2854 2855
2855 2856 static int
2856 2857 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2857 2858 {
2858 2859 hxge_ring_group_t *group = arg;
2859 2860 p_hxge_t hxgep = group->hxgep;
2860 2861 int slot = 0;
2861 2862
2862 2863 ASSERT(group->type == MAC_RING_TYPE_RX);
2863 2864
2864 2865 MUTEX_ENTER(hxgep->genlock);
2865 2866
2866 2867 /*
2867 2868 * Find a slot for the address.
2868 2869 */
2869 2870 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2870 2871 MUTEX_EXIT(hxgep->genlock);
2871 2872 return (ENOSPC);
2872 2873 }
2873 2874
2874 2875 /*
2875 2876 * Program the MAC address.
2876 2877 */
2877 2878 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2878 2879 MUTEX_EXIT(hxgep->genlock);
2879 2880 return (ENOSPC);
2880 2881 }
2881 2882
2882 2883 MUTEX_EXIT(hxgep->genlock);
2883 2884 return (0);
2884 2885 }
2885 2886
2886 2887 static int
2887 2888 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2888 2889 {
2889 2890 hxge_ring_group_t *group = arg;
2890 2891 p_hxge_t hxgep = group->hxgep;
2891 2892 int rv, slot;
2892 2893
2893 2894 ASSERT(group->type == MAC_RING_TYPE_RX);
2894 2895
2895 2896 MUTEX_ENTER(hxgep->genlock);
2896 2897
2897 2898 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2898 2899 MUTEX_EXIT(hxgep->genlock);
2899 2900 return (rv);
2900 2901 }
2901 2902
2902 2903 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2903 2904 MUTEX_EXIT(hxgep->genlock);
2904 2905 return (rv);
2905 2906 }
2906 2907
2907 2908 MUTEX_EXIT(hxgep->genlock);
2908 2909 return (0);
2909 2910 }
2910 2911
2911 2912 static void
2912 2913 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2913 2914 mac_group_info_t *infop, mac_group_handle_t gh)
2914 2915 {
2915 2916 p_hxge_t hxgep = arg;
2916 2917 hxge_ring_group_t *group;
2917 2918
2918 2919 ASSERT(type == MAC_RING_TYPE_RX);
2919 2920
2920 2921 switch (type) {
2921 2922 case MAC_RING_TYPE_RX:
2922 2923 group = &hxgep->rx_groups[groupid];
2923 2924 group->hxgep = hxgep;
2924 2925 group->ghandle = gh;
2925 2926 group->index = groupid;
2926 2927 group->type = type;
2927 2928
2928 2929 infop->mgi_driver = (mac_group_driver_t)group;
2929 2930 infop->mgi_start = hxge_rx_group_start;
2930 2931 infop->mgi_stop = hxge_rx_group_stop;
2931 2932 infop->mgi_addmac = hxge_rx_group_add_mac;
2932 2933 infop->mgi_remmac = hxge_rx_group_rem_mac;
2933 2934 infop->mgi_count = HXGE_MAX_RDCS;
2934 2935 break;
2935 2936
2936 2937 case MAC_RING_TYPE_TX:
2937 2938 default:
2938 2939 break;
2939 2940 }
2940 2941 }
2941 2942
2942 2943 static int
2943 2944 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2944 2945 {
2945 2946 int i;
2946 2947
2947 2948 ASSERT(hxgep->ldgvp != NULL);
2948 2949
2949 2950 switch (type) {
2950 2951 case MAC_RING_TYPE_RX:
2951 2952 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2952 2953 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2953 2954 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2954 2955 return ((int)
2955 2956 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2956 2957 }
2957 2958 }
2958 2959 break;
2959 2960
2960 2961 case MAC_RING_TYPE_TX:
2961 2962 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2962 2963 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2963 2964 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2964 2965 return ((int)
2965 2966 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2966 2967 }
2967 2968 }
2968 2969 break;
2969 2970
2970 2971 default:
2971 2972 break;
2972 2973 }
2973 2974
2974 2975 return (-1);
2975 2976 }
2976 2977
2977 2978 /*
2978 2979 * Callback function for the GLDv3 layer to register all rings.
2979 2980 */
2980 2981 /*ARGSUSED*/
2981 2982 static void
2982 2983 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2983 2984 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2984 2985 {
2985 2986 p_hxge_t hxgep = arg;
2986 2987
2987 2988 ASSERT(hxgep != NULL);
2988 2989 ASSERT(infop != NULL);
2989 2990
2990 2991 switch (type) {
2991 2992 case MAC_RING_TYPE_TX: {
2992 2993 p_hxge_ring_handle_t rhp;
2993 2994 mac_intr_t *mintr = &infop->mri_intr;
2994 2995 p_hxge_intr_t intrp;
2995 2996 int htable_idx;
2996 2997
2997 2998 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2998 2999 rhp = &hxgep->tx_ring_handles[index];
2999 3000 rhp->hxgep = hxgep;
3000 3001 rhp->index = index;
3001 3002 rhp->ring_handle = rh;
3002 3003 infop->mri_driver = (mac_ring_driver_t)rhp;
3003 3004 infop->mri_start = hxge_tx_ring_start;
3004 3005 infop->mri_stop = hxge_tx_ring_stop;
3005 3006 infop->mri_tx = hxge_tx_ring_send;
3006 3007 infop->mri_stat = hxge_tx_ring_stat;
3007 3008
3008 3009 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3009 3010 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3010 3011 if (htable_idx >= 0)
3011 3012 mintr->mi_ddi_handle = intrp->htable[htable_idx];
3012 3013 else
3013 3014 mintr->mi_ddi_handle = NULL;
3014 3015 break;
3015 3016 }
3016 3017
3017 3018 case MAC_RING_TYPE_RX: {
3018 3019 p_hxge_ring_handle_t rhp;
3019 3020 mac_intr_t hxge_mac_intr;
3020 3021 p_hxge_intr_t intrp;
3021 3022 int htable_idx;
3022 3023
3023 3024 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3024 3025 rhp = &hxgep->rx_ring_handles[index];
3025 3026 rhp->hxgep = hxgep;
3026 3027 rhp->index = index;
3027 3028 rhp->ring_handle = rh;
3028 3029
3029 3030 /*
3030 3031 * Entrypoint to enable interrupt (disable poll) and
3031 3032 * disable interrupt (enable poll).
3032 3033 */
3033 3034 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3034 3035 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3035 3036 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3036 3037
3037 3038 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3038 3039 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3039 3040 if (htable_idx >= 0)
3040 3041 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3041 3042 else
3042 3043 hxge_mac_intr.mi_ddi_handle = NULL;
3043 3044
3044 3045 infop->mri_driver = (mac_ring_driver_t)rhp;
3045 3046 infop->mri_start = hxge_rx_ring_start;
3046 3047 infop->mri_stop = hxge_rx_ring_stop;
3047 3048 infop->mri_intr = hxge_mac_intr;
3048 3049 infop->mri_poll = hxge_rx_poll;
3049 3050 infop->mri_stat = hxge_rx_ring_stat;
3050 3051 break;
3051 3052 }
3052 3053
3053 3054 default:
3054 3055 break;
3055 3056 }
3056 3057 }
3057 3058
3058 3059 /*ARGSUSED*/
3059 3060 boolean_t
3060 3061 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3061 3062 {
3062 3063 p_hxge_t hxgep = arg;
3063 3064
3064 3065 switch (cap) {
3065 3066 case MAC_CAPAB_HCKSUM: {
3066 3067 uint32_t *txflags = cap_data;
3067 3068
3068 3069 *txflags = HCKSUM_INET_PARTIAL;
3069 3070 break;
3070 3071 }
3071 3072
3072 3073 case MAC_CAPAB_RINGS: {
3073 3074 mac_capab_rings_t *cap_rings = cap_data;
3074 3075
3075 3076 MUTEX_ENTER(hxgep->genlock);
3076 3077 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3077 3078 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3078 3079 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3079 3080 cap_rings->mr_rget = hxge_fill_ring;
3080 3081 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3081 3082 cap_rings->mr_gget = hxge_group_get;
3082 3083 cap_rings->mr_gaddring = NULL;
3083 3084 cap_rings->mr_gremring = NULL;
3084 3085 } else {
3085 3086 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3086 3087 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3087 3088 cap_rings->mr_rget = hxge_fill_ring;
3088 3089 cap_rings->mr_gnum = 0;
3089 3090 cap_rings->mr_gget = NULL;
3090 3091 cap_rings->mr_gaddring = NULL;
3091 3092 cap_rings->mr_gremring = NULL;
3092 3093 }
3093 3094 MUTEX_EXIT(hxgep->genlock);
3094 3095 break;
3095 3096 }
3096 3097
3097 3098 default:
3098 3099 return (B_FALSE);
3099 3100 }
3100 3101 return (B_TRUE);
3101 3102 }
3102 3103
3103 3104 static boolean_t
3104 3105 hxge_param_locked(mac_prop_id_t pr_num)
3105 3106 {
3106 3107 /*
3107 3108 * All adv_* parameters are locked (read-only) while
3108 3109 * the device is in any sort of loopback mode ...
3109 3110 */
3110 3111 switch (pr_num) {
3111 3112 case MAC_PROP_ADV_1000FDX_CAP:
3112 3113 case MAC_PROP_EN_1000FDX_CAP:
3113 3114 case MAC_PROP_ADV_1000HDX_CAP:
3114 3115 case MAC_PROP_EN_1000HDX_CAP:
3115 3116 case MAC_PROP_ADV_100FDX_CAP:
3116 3117 case MAC_PROP_EN_100FDX_CAP:
3117 3118 case MAC_PROP_ADV_100HDX_CAP:
3118 3119 case MAC_PROP_EN_100HDX_CAP:
3119 3120 case MAC_PROP_ADV_10FDX_CAP:
3120 3121 case MAC_PROP_EN_10FDX_CAP:
3121 3122 case MAC_PROP_ADV_10HDX_CAP:
3122 3123 case MAC_PROP_EN_10HDX_CAP:
3123 3124 case MAC_PROP_AUTONEG:
3124 3125 case MAC_PROP_FLOWCTRL:
3125 3126 return (B_TRUE);
3126 3127 }
3127 3128 return (B_FALSE);
3128 3129 }
3129 3130
3130 3131 /*
3131 3132 * callback functions for set/get of properties
3132 3133 */
3133 3134 static int
3134 3135 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3135 3136 uint_t pr_valsize, const void *pr_val)
3136 3137 {
3137 3138 hxge_t *hxgep = barg;
3138 3139 p_hxge_stats_t statsp;
3139 3140 int err = 0;
3140 3141 uint32_t new_mtu, old_framesize, new_framesize;
3141 3142
3142 3143 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3143 3144
3144 3145 statsp = hxgep->statsp;
3145 3146 MUTEX_ENTER(hxgep->genlock);
3146 3147 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3147 3148 hxge_param_locked(pr_num)) {
3148 3149 /*
3149 3150 * All adv_* parameters are locked (read-only)
3150 3151 * while the device is in any sort of loopback mode.
3151 3152 */
3152 3153 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3153 3154 "==> hxge_m_setprop: loopback mode: read only"));
3154 3155 MUTEX_EXIT(hxgep->genlock);
3155 3156 return (EBUSY);
3156 3157 }
3157 3158
3158 3159 switch (pr_num) {
3159 3160 /*
3160 3161 * These properties are either not exist or read only
3161 3162 */
3162 3163 case MAC_PROP_EN_1000FDX_CAP:
3163 3164 case MAC_PROP_EN_100FDX_CAP:
3164 3165 case MAC_PROP_EN_10FDX_CAP:
3165 3166 case MAC_PROP_EN_1000HDX_CAP:
3166 3167 case MAC_PROP_EN_100HDX_CAP:
3167 3168 case MAC_PROP_EN_10HDX_CAP:
3168 3169 case MAC_PROP_ADV_1000FDX_CAP:
3169 3170 case MAC_PROP_ADV_1000HDX_CAP:
3170 3171 case MAC_PROP_ADV_100FDX_CAP:
3171 3172 case MAC_PROP_ADV_100HDX_CAP:
3172 3173 case MAC_PROP_ADV_10FDX_CAP:
3173 3174 case MAC_PROP_ADV_10HDX_CAP:
3174 3175 case MAC_PROP_STATUS:
3175 3176 case MAC_PROP_SPEED:
3176 3177 case MAC_PROP_DUPLEX:
3177 3178 case MAC_PROP_AUTONEG:
3178 3179 /*
3179 3180 * Flow control is handled in the shared domain and
3180 3181 * it is readonly here.
3181 3182 */
3182 3183 case MAC_PROP_FLOWCTRL:
3183 3184 err = EINVAL;
3184 3185 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3185 3186 "==> hxge_m_setprop: read only property %d",
3186 3187 pr_num));
3187 3188 break;
3188 3189
3189 3190 case MAC_PROP_MTU:
3190 3191 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3191 3192 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3192 3193 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3193 3194
3194 3195 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3195 3196 if (new_framesize == hxgep->vmac.maxframesize) {
3196 3197 err = 0;
3197 3198 break;
3198 3199 }
3199 3200
3200 3201 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3201 3202 err = EBUSY;
3202 3203 break;
3203 3204 }
3204 3205
3205 3206 if (new_framesize < MIN_FRAME_SIZE ||
3206 3207 new_framesize > MAX_FRAME_SIZE) {
3207 3208 err = EINVAL;
3208 3209 break;
3209 3210 }
3210 3211
3211 3212 old_framesize = hxgep->vmac.maxframesize;
3212 3213 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3213 3214
3214 3215 if (hxge_vmac_set_framesize(hxgep)) {
3215 3216 hxgep->vmac.maxframesize =
3216 3217 (uint16_t)old_framesize;
3217 3218 err = EINVAL;
3218 3219 break;
3219 3220 }
3220 3221
3221 3222 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3222 3223 if (err) {
3223 3224 hxgep->vmac.maxframesize =
3224 3225 (uint16_t)old_framesize;
3225 3226 (void) hxge_vmac_set_framesize(hxgep);
3226 3227 }
3227 3228
3228 3229 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3229 3230 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3230 3231 new_mtu, hxgep->vmac.maxframesize));
3231 3232 break;
3232 3233
3233 3234 case MAC_PROP_PRIVATE:
3234 3235 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3235 3236 "==> hxge_m_setprop: private property"));
3236 3237 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3237 3238 pr_val);
3238 3239 break;
3239 3240
3240 3241 default:
3241 3242 err = ENOTSUP;
3242 3243 break;
3243 3244 }
3244 3245
3245 3246 MUTEX_EXIT(hxgep->genlock);
3246 3247
3247 3248 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3248 3249 "<== hxge_m_setprop (return %d)", err));
3249 3250
3250 3251 return (err);
3251 3252 }
3252 3253
3253 3254 static int
3254 3255 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3255 3256 uint_t pr_valsize, void *pr_val)
3256 3257 {
3257 3258 hxge_t *hxgep = barg;
3258 3259 p_hxge_stats_t statsp = hxgep->statsp;
3259 3260 int err = 0;
3260 3261 link_flowctrl_t fl;
3261 3262 uint64_t tmp = 0;
3262 3263 link_state_t ls;
3263 3264
3264 3265 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3265 3266 "==> hxge_m_getprop: pr_num %d", pr_num));
3266 3267
3267 3268 switch (pr_num) {
3268 3269 case MAC_PROP_DUPLEX:
3269 3270 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3270 3271 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3271 3272 "==> hxge_m_getprop: duplex mode %d",
3272 3273 *(uint8_t *)pr_val));
3273 3274 break;
3274 3275
3275 3276 case MAC_PROP_SPEED:
3276 3277 ASSERT(pr_valsize >= sizeof (uint64_t));
3277 3278 tmp = statsp->mac_stats.link_speed * 1000000ull;
3278 3279 bcopy(&tmp, pr_val, sizeof (tmp));
3279 3280 break;
3280 3281
3281 3282 case MAC_PROP_STATUS:
3282 3283 ASSERT(pr_valsize >= sizeof (link_state_t));
3283 3284 if (!statsp->mac_stats.link_up)
3284 3285 ls = LINK_STATE_DOWN;
3285 3286 else
3286 3287 ls = LINK_STATE_UP;
3287 3288 bcopy(&ls, pr_val, sizeof (ls));
3288 3289 break;
3289 3290
3290 3291 case MAC_PROP_FLOWCTRL:
3291 3292 /*
3292 3293 * Flow control is supported by the shared domain and
3293 3294 * it is currently transmit only
3294 3295 */
3295 3296 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3296 3297 fl = LINK_FLOWCTRL_TX;
3297 3298 bcopy(&fl, pr_val, sizeof (fl));
3298 3299 break;
3299 3300 case MAC_PROP_AUTONEG:
3300 3301 /* 10G link only and it is not negotiable */
3301 3302 *(uint8_t *)pr_val = 0;
3302 3303 break;
3303 3304 case MAC_PROP_ADV_1000FDX_CAP:
3304 3305 case MAC_PROP_ADV_100FDX_CAP:
3305 3306 case MAC_PROP_ADV_10FDX_CAP:
3306 3307 case MAC_PROP_ADV_1000HDX_CAP:
3307 3308 case MAC_PROP_ADV_100HDX_CAP:
3308 3309 case MAC_PROP_ADV_10HDX_CAP:
3309 3310 case MAC_PROP_EN_1000FDX_CAP:
3310 3311 case MAC_PROP_EN_100FDX_CAP:
3311 3312 case MAC_PROP_EN_10FDX_CAP:
3312 3313 case MAC_PROP_EN_1000HDX_CAP:
3313 3314 case MAC_PROP_EN_100HDX_CAP:
↓ open down ↓ |
3279 lines elided |
↑ open up ↑ |
3314 3315 case MAC_PROP_EN_10HDX_CAP:
3315 3316 err = ENOTSUP;
3316 3317 break;
3317 3318
3318 3319 case MAC_PROP_PRIVATE:
3319 3320 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3320 3321 pr_val);
3321 3322 break;
3322 3323
3323 3324 default:
3324 - err = EINVAL;
3325 + err = ENOTSUP;
3325 3326 break;
3326 3327 }
3327 3328
3328 3329 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3329 3330
3330 3331 return (err);
3331 3332 }
3332 3333
3333 3334 static void
3334 3335 hxge_m_propinfo(void *arg, const char *pr_name,
3335 3336 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3336 3337 {
3337 3338 _NOTE(ARGUNUSED(arg));
3338 3339 switch (pr_num) {
3339 3340 case MAC_PROP_DUPLEX:
3340 3341 case MAC_PROP_SPEED:
3341 3342 case MAC_PROP_STATUS:
3342 3343 case MAC_PROP_AUTONEG:
3343 3344 case MAC_PROP_FLOWCTRL:
3344 3345 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3345 3346 break;
3346 3347
3347 3348 case MAC_PROP_MTU:
3348 3349 mac_prop_info_set_range_uint32(prh,
3349 3350 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3350 3351 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3351 3352 break;
3352 3353
3353 3354 case MAC_PROP_PRIVATE: {
3354 3355 char valstr[MAXNAMELEN];
3355 3356
3356 3357 bzero(valstr, sizeof (valstr));
3357 3358
3358 3359 /* Receive Interrupt Blanking Parameters */
3359 3360 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3360 3361 (void) snprintf(valstr, sizeof (valstr), "%d",
3361 3362 RXDMA_RCR_TO_DEFAULT);
3362 3363 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3363 3364 (void) snprintf(valstr, sizeof (valstr), "%d",
3364 3365 RXDMA_RCR_PTHRES_DEFAULT);
3365 3366
3366 3367 /* Classification and Load Distribution Configuration */
3367 3368 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3368 3369 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3369 3370 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3370 3371 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3371 3372 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3372 3373 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3373 3374 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3374 3375 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3375 3376 (void) snprintf(valstr, sizeof (valstr), "%d",
3376 3377 HXGE_CLASS_TCAM_LOOKUP);
3377 3378 }
3378 3379
3379 3380 if (strlen(valstr) > 0)
3380 3381 mac_prop_info_set_default_str(prh, valstr);
3381 3382 break;
3382 3383 }
3383 3384 }
3384 3385 }
3385 3386
3386 3387
3387 3388 /* ARGSUSED */
3388 3389 static int
3389 3390 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3390 3391 const void *pr_val)
3391 3392 {
3392 3393 p_hxge_param_t param_arr = hxgep->param_arr;
3393 3394 int err = 0;
3394 3395
3395 3396 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3396 3397 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3397 3398
3398 3399 if (pr_val == NULL) {
3399 3400 return (EINVAL);
3400 3401 }
3401 3402
3402 3403 /* Blanking */
3403 3404 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3404 3405 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3405 3406 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]);
3406 3407 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3407 3408 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3408 3409 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
3409 3410
3410 3411 /* Classification */
3411 3412 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3412 3413 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3413 3414 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3414 3415 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3415 3416 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3416 3417 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3417 3418 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3418 3419 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3419 3420 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3420 3421 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3421 3422 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3422 3423 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3423 3424 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3424 3425 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3425 3426 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
3426 3427 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3427 3428 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3428 3429 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3429 3430 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3430 3431 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3431 3432 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3432 3433 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3433 3434 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3434 3435 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3435 3436 } else {
3436 - err = EINVAL;
3437 + err = ENOTSUP;
3437 3438 }
3438 3439
3439 3440 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3440 3441 "<== hxge_set_priv_prop: err %d", err));
3441 3442
3442 3443 return (err);
3443 3444 }
3444 3445
3445 3446 static int
3446 3447 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3447 3448 void *pr_val)
3448 3449 {
3449 3450 p_hxge_param_t param_arr = hxgep->param_arr;
3450 3451 char valstr[MAXNAMELEN];
3451 3452 int err = 0;
3452 3453 uint_t strsize;
3453 3454 int value = 0;
3454 3455
3455 3456 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3456 3457 "==> hxge_get_priv_prop: property %s", pr_name));
3457 3458
3458 3459 /* Receive Interrupt Blanking Parameters */
3459 3460 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3460 3461 value = hxgep->intr_timeout;
3461 3462 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3462 3463 value = hxgep->intr_threshold;
3463 3464
3464 3465 /* Classification and Load Distribution Configuration */
3465 3466 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3466 3467 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3467 3468 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3468 3469
3469 3470 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3470 3471 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3471 3472 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3472 3473 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3473 3474
3474 3475 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3475 3476 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3476 3477 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3477 3478 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3478 3479
3479 3480 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3480 3481 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3481 3482 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3482 3483 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3483 3484
3484 3485 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3485 3486 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3486 3487 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3487 3488 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3488 3489
3489 3490 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3490 3491 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3491 3492 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3492 3493 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3493 3494
3494 3495 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3495 3496 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
3496 3497 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3497 3498 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3498 3499
3499 3500 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3500 3501 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3501 3502 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3502 3503 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3503 3504
3504 3505 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3505 3506 } else {
3506 - err = EINVAL;
3507 + err = ENOTSUP;
3507 3508 }
3508 3509
3509 3510 if (err == 0) {
3510 3511 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3511 3512
3512 3513 strsize = (uint_t)strlen(valstr);
3513 3514 if (pr_valsize < strsize) {
3514 3515 err = ENOBUFS;
3515 3516 } else {
3516 3517 (void) strlcpy(pr_val, valstr, pr_valsize);
3517 3518 }
3518 3519 }
3519 3520
3520 3521 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3521 3522 "<== hxge_get_priv_prop: return %d", err));
3522 3523
3523 3524 return (err);
3524 3525 }
3525 3526 /*
3526 3527 * Module loading and removing entry points.
3527 3528 */
3528 3529 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3529 3530 nodev, NULL, D_MP, NULL, NULL);
3530 3531
3531 3532 extern struct mod_ops mod_driverops;
3532 3533
3533 3534 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3534 3535
3535 3536 /*
3536 3537 * Module linkage information for the kernel.
3537 3538 */
3538 3539 static struct modldrv hxge_modldrv = {
3539 3540 &mod_driverops,
3540 3541 HXGE_DESC_VER,
3541 3542 &hxge_dev_ops
3542 3543 };
3543 3544
3544 3545 static struct modlinkage modlinkage = {
3545 3546 MODREV_1, (void *) &hxge_modldrv, NULL
3546 3547 };
3547 3548
3548 3549 int
3549 3550 _init(void)
3550 3551 {
3551 3552 int status;
3552 3553
3553 3554 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3554 3555 mac_init_ops(&hxge_dev_ops, "hxge");
3555 3556 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3556 3557 if (status != 0) {
3557 3558 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3558 3559 "failed to init device soft state"));
3559 3560 mac_fini_ops(&hxge_dev_ops);
3560 3561 goto _init_exit;
3561 3562 }
3562 3563
3563 3564 status = mod_install(&modlinkage);
3564 3565 if (status != 0) {
3565 3566 ddi_soft_state_fini(&hxge_list);
3566 3567 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3567 3568 goto _init_exit;
3568 3569 }
3569 3570
3570 3571 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3571 3572
3572 3573 _init_exit:
3573 3574 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3574 3575
3575 3576 return (status);
3576 3577 }
3577 3578
3578 3579 int
3579 3580 _fini(void)
3580 3581 {
3581 3582 int status;
3582 3583
3583 3584 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3584 3585
3585 3586 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3586 3587
3587 3588 if (hxge_mblks_pending)
3588 3589 return (EBUSY);
3589 3590
3590 3591 status = mod_remove(&modlinkage);
3591 3592 if (status != DDI_SUCCESS) {
3592 3593 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3593 3594 "Module removal failed 0x%08x", status));
3594 3595 goto _fini_exit;
3595 3596 }
3596 3597
3597 3598 mac_fini_ops(&hxge_dev_ops);
3598 3599
3599 3600 ddi_soft_state_fini(&hxge_list);
3600 3601
3601 3602 MUTEX_DESTROY(&hxge_common_lock);
3602 3603
3603 3604 _fini_exit:
3604 3605 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3605 3606
3606 3607 return (status);
3607 3608 }
3608 3609
3609 3610 int
3610 3611 _info(struct modinfo *modinfop)
3611 3612 {
3612 3613 int status;
3613 3614
3614 3615 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3615 3616 status = mod_info(&modlinkage, modinfop);
3616 3617 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3617 3618
3618 3619 return (status);
3619 3620 }
3620 3621
3621 3622 /*ARGSUSED*/
3622 3623 static hxge_status_t
3623 3624 hxge_add_intrs(p_hxge_t hxgep)
3624 3625 {
3625 3626 int intr_types;
3626 3627 int type = 0;
3627 3628 int ddi_status = DDI_SUCCESS;
3628 3629 hxge_status_t status = HXGE_OK;
3629 3630
3630 3631 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3631 3632
3632 3633 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3633 3634 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3634 3635 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3635 3636 hxgep->hxge_intr_type.intr_added = 0;
3636 3637 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3637 3638 hxgep->hxge_intr_type.intr_type = 0;
3638 3639
3639 3640 if (hxge_msi_enable) {
3640 3641 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3641 3642 }
3642 3643
3643 3644 /* Get the supported interrupt types */
3644 3645 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3645 3646 != DDI_SUCCESS) {
3646 3647 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3647 3648 "ddi_intr_get_supported_types failed: status 0x%08x",
3648 3649 ddi_status));
3649 3650 return (HXGE_ERROR | HXGE_DDI_FAILED);
3650 3651 }
3651 3652
3652 3653 hxgep->hxge_intr_type.intr_types = intr_types;
3653 3654
3654 3655 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3655 3656 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3656 3657
3657 3658 /*
3658 3659 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3659 3660 * (1): 1 - MSI
3660 3661 * (2): 2 - MSI-X
3661 3662 * others - FIXED
3662 3663 */
3663 3664 switch (hxge_msi_enable) {
3664 3665 default:
3665 3666 type = DDI_INTR_TYPE_FIXED;
3666 3667 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3667 3668 "use fixed (intx emulation) type %08x", type));
3668 3669 break;
3669 3670
3670 3671 case 2:
3671 3672 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3672 3673 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3673 3674 if (intr_types & DDI_INTR_TYPE_MSIX) {
3674 3675 type = DDI_INTR_TYPE_MSIX;
3675 3676 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3676 3677 "==> hxge_add_intrs: "
3677 3678 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3678 3679 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3679 3680 type = DDI_INTR_TYPE_MSI;
3680 3681 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3681 3682 "==> hxge_add_intrs: "
3682 3683 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3683 3684 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3684 3685 type = DDI_INTR_TYPE_FIXED;
3685 3686 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3686 3687 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3687 3688 }
3688 3689 break;
3689 3690
3690 3691 case 1:
3691 3692 if (intr_types & DDI_INTR_TYPE_MSI) {
3692 3693 type = DDI_INTR_TYPE_MSI;
3693 3694 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3694 3695 "==> hxge_add_intrs: "
3695 3696 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3696 3697 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3697 3698 type = DDI_INTR_TYPE_MSIX;
3698 3699 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3699 3700 "==> hxge_add_intrs: "
3700 3701 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3701 3702 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3702 3703 type = DDI_INTR_TYPE_FIXED;
3703 3704 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3704 3705 "==> hxge_add_intrs: "
3705 3706 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3706 3707 }
3707 3708 }
3708 3709
3709 3710 hxgep->hxge_intr_type.intr_type = type;
3710 3711 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3711 3712 type == DDI_INTR_TYPE_FIXED) &&
3712 3713 hxgep->hxge_intr_type.niu_msi_enable) {
3713 3714 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3714 3715 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3715 3716 " hxge_add_intrs: "
3716 3717 " hxge_add_intrs_adv failed: status 0x%08x",
3717 3718 status));
3718 3719 return (status);
3719 3720 } else {
3720 3721 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3721 3722 "interrupts registered : type %d", type));
3722 3723 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3723 3724
3724 3725 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3725 3726 "\nAdded advanced hxge add_intr_adv "
3726 3727 "intr type 0x%x\n", type));
3727 3728
3728 3729 return (status);
3729 3730 }
3730 3731 }
3731 3732
3732 3733 if (!hxgep->hxge_intr_type.intr_registered) {
3733 3734 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3734 3735 "==> hxge_add_intrs: failed to register interrupts"));
3735 3736 return (HXGE_ERROR | HXGE_DDI_FAILED);
3736 3737 }
3737 3738
3738 3739 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3739 3740
3740 3741 return (status);
3741 3742 }
3742 3743
3743 3744 /*ARGSUSED*/
3744 3745 static hxge_status_t
3745 3746 hxge_add_intrs_adv(p_hxge_t hxgep)
3746 3747 {
3747 3748 int intr_type;
3748 3749 p_hxge_intr_t intrp;
3749 3750 hxge_status_t status;
3750 3751
3751 3752 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3752 3753
3753 3754 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3754 3755 intr_type = intrp->intr_type;
3755 3756
3756 3757 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3757 3758 intr_type));
3758 3759
3759 3760 switch (intr_type) {
3760 3761 case DDI_INTR_TYPE_MSI: /* 0x2 */
3761 3762 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3762 3763 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3763 3764 break;
3764 3765
3765 3766 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3766 3767 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3767 3768 break;
3768 3769
3769 3770 default:
3770 3771 status = HXGE_ERROR;
3771 3772 break;
3772 3773 }
3773 3774
3774 3775 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3775 3776
3776 3777 return (status);
3777 3778 }
3778 3779
3779 3780 /*ARGSUSED*/
3780 3781 static hxge_status_t
3781 3782 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3782 3783 {
3783 3784 dev_info_t *dip = hxgep->dip;
3784 3785 p_hxge_ldg_t ldgp;
3785 3786 p_hxge_intr_t intrp;
3786 3787 uint_t *inthandler;
3787 3788 void *arg1, *arg2;
3788 3789 int behavior;
3789 3790 int nintrs, navail;
3790 3791 int nactual, nrequired, nrequest;
3791 3792 int inum = 0;
3792 3793 int loop = 0;
3793 3794 int x, y;
3794 3795 int ddi_status = DDI_SUCCESS;
3795 3796 hxge_status_t status = HXGE_OK;
3796 3797
3797 3798 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3798 3799
3799 3800 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3800 3801
3801 3802 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3802 3803 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3803 3804 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3804 3805 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3805 3806 "nintrs: %d", ddi_status, nintrs));
3806 3807 return (HXGE_ERROR | HXGE_DDI_FAILED);
3807 3808 }
3808 3809
3809 3810 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3810 3811 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3811 3812 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3812 3813 "ddi_intr_get_navail() failed, status: 0x%x%, "
3813 3814 "nintrs: %d", ddi_status, navail));
3814 3815 return (HXGE_ERROR | HXGE_DDI_FAILED);
3815 3816 }
3816 3817
3817 3818 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3818 3819 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3819 3820 int_type, nintrs, navail));
3820 3821
3821 3822 /* PSARC/2007/453 MSI-X interrupt limit override */
3822 3823 if (int_type == DDI_INTR_TYPE_MSIX) {
3823 3824 nrequest = hxge_create_msi_property(hxgep);
3824 3825 if (nrequest < navail) {
3825 3826 navail = nrequest;
3826 3827 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3827 3828 "hxge_add_intrs_adv_type: nintrs %d "
3828 3829 "navail %d (nrequest %d)",
3829 3830 nintrs, navail, nrequest));
3830 3831 }
3831 3832 }
3832 3833
3833 3834 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3834 3835 /* MSI must be power of 2 */
3835 3836 if ((navail & 16) == 16) {
3836 3837 navail = 16;
3837 3838 } else if ((navail & 8) == 8) {
3838 3839 navail = 8;
3839 3840 } else if ((navail & 4) == 4) {
3840 3841 navail = 4;
3841 3842 } else if ((navail & 2) == 2) {
3842 3843 navail = 2;
3843 3844 } else {
3844 3845 navail = 1;
3845 3846 }
3846 3847 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3847 3848 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3848 3849 "navail %d", nintrs, navail));
3849 3850 }
3850 3851
3851 3852 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3852 3853 "requesting: intr type %d nintrs %d, navail %d",
3853 3854 int_type, nintrs, navail));
3854 3855
3855 3856 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3856 3857 DDI_INTR_ALLOC_NORMAL);
3857 3858 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3858 3859 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3859 3860
3860 3861 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3861 3862 navail, &nactual, behavior);
3862 3863 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3863 3864 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3864 3865 " ddi_intr_alloc() failed: %d", ddi_status));
3865 3866 kmem_free(intrp->htable, intrp->intr_size);
3866 3867 return (HXGE_ERROR | HXGE_DDI_FAILED);
3867 3868 }
3868 3869
3869 3870 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3870 3871 "ddi_intr_alloc() returned: navail %d nactual %d",
3871 3872 navail, nactual));
3872 3873
3873 3874 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3874 3875 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3875 3876 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3876 3877 " ddi_intr_get_pri() failed: %d", ddi_status));
3877 3878 /* Free already allocated interrupts */
3878 3879 for (y = 0; y < nactual; y++) {
3879 3880 (void) ddi_intr_free(intrp->htable[y]);
3880 3881 }
3881 3882
3882 3883 kmem_free(intrp->htable, intrp->intr_size);
3883 3884 return (HXGE_ERROR | HXGE_DDI_FAILED);
3884 3885 }
3885 3886
3886 3887 nrequired = 0;
3887 3888 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3888 3889 if (status != HXGE_OK) {
3889 3890 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3890 3891 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3891 3892 "failed: 0x%x", status));
3892 3893 /* Free already allocated interrupts */
3893 3894 for (y = 0; y < nactual; y++) {
3894 3895 (void) ddi_intr_free(intrp->htable[y]);
3895 3896 }
3896 3897
3897 3898 kmem_free(intrp->htable, intrp->intr_size);
3898 3899 return (status);
3899 3900 }
3900 3901
3901 3902 ldgp = hxgep->ldgvp->ldgp;
3902 3903 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3903 3904 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3904 3905
3905 3906 if (nactual < nrequired)
3906 3907 loop = nactual;
3907 3908 else
3908 3909 loop = nrequired;
3909 3910
3910 3911 for (x = 0; x < loop; x++, ldgp++) {
3911 3912 ldgp->vector = (uint8_t)x;
3912 3913 arg1 = ldgp->ldvp;
3913 3914 arg2 = hxgep;
3914 3915 if (ldgp->nldvs == 1) {
3915 3916 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3916 3917 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3917 3918 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3918 3919 "1-1 int handler (entry %d)\n",
3919 3920 arg1, arg2, x));
3920 3921 } else if (ldgp->nldvs > 1) {
3921 3922 inthandler = (uint_t *)ldgp->sys_intr_handler;
3922 3923 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3923 3924 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3924 3925 "nldevs %d int handler (entry %d)\n",
3925 3926 arg1, arg2, ldgp->nldvs, x));
3926 3927 }
3927 3928 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3928 3929 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3929 3930 "htable 0x%llx", x, intrp->htable[x]));
3930 3931
3931 3932 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3932 3933 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3933 3934 DDI_SUCCESS) {
3934 3935 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3935 3936 "==> hxge_add_intrs_adv_type: failed #%d "
3936 3937 "status 0x%x", x, ddi_status));
3937 3938 for (y = 0; y < intrp->intr_added; y++) {
3938 3939 (void) ddi_intr_remove_handler(
3939 3940 intrp->htable[y]);
3940 3941 }
3941 3942
3942 3943 /* Free already allocated intr */
3943 3944 for (y = 0; y < nactual; y++) {
3944 3945 (void) ddi_intr_free(intrp->htable[y]);
3945 3946 }
3946 3947 kmem_free(intrp->htable, intrp->intr_size);
3947 3948
3948 3949 (void) hxge_ldgv_uninit(hxgep);
3949 3950
3950 3951 return (HXGE_ERROR | HXGE_DDI_FAILED);
3951 3952 }
3952 3953
3953 3954 ldgp->htable_idx = x;
3954 3955 intrp->intr_added++;
3955 3956 }
3956 3957 intrp->msi_intx_cnt = nactual;
3957 3958
3958 3959 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3959 3960 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3960 3961 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3961 3962
3962 3963 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3963 3964 (void) hxge_intr_ldgv_init(hxgep);
3964 3965
3965 3966 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3966 3967
3967 3968 return (status);
3968 3969 }
3969 3970
3970 3971 /*ARGSUSED*/
3971 3972 static hxge_status_t
3972 3973 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3973 3974 {
3974 3975 dev_info_t *dip = hxgep->dip;
3975 3976 p_hxge_ldg_t ldgp;
3976 3977 p_hxge_intr_t intrp;
3977 3978 uint_t *inthandler;
3978 3979 void *arg1, *arg2;
3979 3980 int behavior;
3980 3981 int nintrs, navail;
3981 3982 int nactual, nrequired;
3982 3983 int inum = 0;
3983 3984 int x, y;
3984 3985 int ddi_status = DDI_SUCCESS;
3985 3986 hxge_status_t status = HXGE_OK;
3986 3987
3987 3988 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3988 3989 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3989 3990
3990 3991 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3991 3992 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3992 3993 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3993 3994 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3994 3995 "nintrs: %d", status, nintrs));
3995 3996 return (HXGE_ERROR | HXGE_DDI_FAILED);
3996 3997 }
3997 3998
3998 3999 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3999 4000 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4000 4001 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4001 4002 "ddi_intr_get_navail() failed, status: 0x%x%, "
4002 4003 "nintrs: %d", ddi_status, navail));
4003 4004 return (HXGE_ERROR | HXGE_DDI_FAILED);
4004 4005 }
4005 4006
4006 4007 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4007 4008 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4008 4009 nintrs, navail));
4009 4010
4010 4011 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4011 4012 DDI_INTR_ALLOC_NORMAL);
4012 4013 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4013 4014 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4014 4015 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4015 4016 navail, &nactual, behavior);
4016 4017 if (ddi_status != DDI_SUCCESS || nactual == 0) {
4017 4018 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4018 4019 " ddi_intr_alloc() failed: %d", ddi_status));
4019 4020 kmem_free(intrp->htable, intrp->intr_size);
4020 4021 return (HXGE_ERROR | HXGE_DDI_FAILED);
4021 4022 }
4022 4023
4023 4024 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4024 4025 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4025 4026 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4026 4027 " ddi_intr_get_pri() failed: %d", ddi_status));
4027 4028 /* Free already allocated interrupts */
4028 4029 for (y = 0; y < nactual; y++) {
4029 4030 (void) ddi_intr_free(intrp->htable[y]);
4030 4031 }
4031 4032
4032 4033 kmem_free(intrp->htable, intrp->intr_size);
4033 4034 return (HXGE_ERROR | HXGE_DDI_FAILED);
4034 4035 }
4035 4036
4036 4037 nrequired = 0;
4037 4038 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4038 4039 if (status != HXGE_OK) {
4039 4040 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4040 4041 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4041 4042 "failed: 0x%x", status));
4042 4043 /* Free already allocated interrupts */
4043 4044 for (y = 0; y < nactual; y++) {
4044 4045 (void) ddi_intr_free(intrp->htable[y]);
4045 4046 }
4046 4047
4047 4048 kmem_free(intrp->htable, intrp->intr_size);
4048 4049 return (status);
4049 4050 }
4050 4051
4051 4052 ldgp = hxgep->ldgvp->ldgp;
4052 4053 for (x = 0; x < nrequired; x++, ldgp++) {
4053 4054 ldgp->vector = (uint8_t)x;
4054 4055 arg1 = ldgp->ldvp;
4055 4056 arg2 = hxgep;
4056 4057 if (ldgp->nldvs == 1) {
4057 4058 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4058 4059 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4059 4060 "hxge_add_intrs_adv_type_fix: "
4060 4061 "1-1 int handler(%d) ldg %d ldv %d "
4061 4062 "arg1 $%p arg2 $%p\n",
4062 4063 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4063 4064 } else if (ldgp->nldvs > 1) {
4064 4065 inthandler = (uint_t *)ldgp->sys_intr_handler;
4065 4066 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4066 4067 "hxge_add_intrs_adv_type_fix: "
4067 4068 "shared ldv %d int handler(%d) ldv %d ldg %d"
4068 4069 "arg1 0x%016llx arg2 0x%016llx\n",
4069 4070 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4070 4071 arg1, arg2));
4071 4072 }
4072 4073
4073 4074 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4074 4075 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4075 4076 DDI_SUCCESS) {
4076 4077 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4077 4078 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4078 4079 "status 0x%x", x, ddi_status));
4079 4080 for (y = 0; y < intrp->intr_added; y++) {
4080 4081 (void) ddi_intr_remove_handler(
4081 4082 intrp->htable[y]);
4082 4083 }
4083 4084 for (y = 0; y < nactual; y++) {
4084 4085 (void) ddi_intr_free(intrp->htable[y]);
4085 4086 }
4086 4087 /* Free already allocated intr */
4087 4088 kmem_free(intrp->htable, intrp->intr_size);
4088 4089
4089 4090 (void) hxge_ldgv_uninit(hxgep);
4090 4091
4091 4092 return (HXGE_ERROR | HXGE_DDI_FAILED);
4092 4093 }
4093 4094 intrp->intr_added++;
4094 4095 }
4095 4096
4096 4097 intrp->msi_intx_cnt = nactual;
4097 4098
4098 4099 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4099 4100
4100 4101 status = hxge_intr_ldgv_init(hxgep);
4101 4102
4102 4103 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4103 4104
4104 4105 return (status);
4105 4106 }
4106 4107
4107 4108 /*ARGSUSED*/
4108 4109 static void
4109 4110 hxge_remove_intrs(p_hxge_t hxgep)
4110 4111 {
4111 4112 int i, inum;
4112 4113 p_hxge_intr_t intrp;
4113 4114
4114 4115 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4115 4116 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4116 4117 if (!intrp->intr_registered) {
4117 4118 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4118 4119 "<== hxge_remove_intrs: interrupts not registered"));
4119 4120 return;
4120 4121 }
4121 4122
4122 4123 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4123 4124
4124 4125 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4125 4126 (void) ddi_intr_block_disable(intrp->htable,
4126 4127 intrp->intr_added);
4127 4128 } else {
4128 4129 for (i = 0; i < intrp->intr_added; i++) {
4129 4130 (void) ddi_intr_disable(intrp->htable[i]);
4130 4131 }
4131 4132 }
4132 4133
4133 4134 for (inum = 0; inum < intrp->intr_added; inum++) {
4134 4135 if (intrp->htable[inum]) {
4135 4136 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4136 4137 }
4137 4138 }
4138 4139
4139 4140 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4140 4141 if (intrp->htable[inum]) {
4141 4142 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4142 4143 "hxge_remove_intrs: ddi_intr_free inum %d "
4143 4144 "msi_intx_cnt %d intr_added %d",
4144 4145 inum, intrp->msi_intx_cnt, intrp->intr_added));
4145 4146
4146 4147 (void) ddi_intr_free(intrp->htable[inum]);
4147 4148 }
4148 4149 }
4149 4150
4150 4151 kmem_free(intrp->htable, intrp->intr_size);
4151 4152 intrp->intr_registered = B_FALSE;
4152 4153 intrp->intr_enabled = B_FALSE;
4153 4154 intrp->msi_intx_cnt = 0;
4154 4155 intrp->intr_added = 0;
4155 4156
4156 4157 (void) hxge_ldgv_uninit(hxgep);
4157 4158
4158 4159 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4159 4160 }
4160 4161
4161 4162 /*ARGSUSED*/
4162 4163 static void
4163 4164 hxge_intrs_enable(p_hxge_t hxgep)
4164 4165 {
4165 4166 p_hxge_intr_t intrp;
4166 4167 int i;
4167 4168 int status;
4168 4169
4169 4170 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4170 4171
4171 4172 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4172 4173
4173 4174 if (!intrp->intr_registered) {
4174 4175 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4175 4176 "interrupts are not registered"));
4176 4177 return;
4177 4178 }
4178 4179
4179 4180 if (intrp->intr_enabled) {
4180 4181 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4181 4182 "<== hxge_intrs_enable: already enabled"));
4182 4183 return;
4183 4184 }
4184 4185
4185 4186 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4186 4187 status = ddi_intr_block_enable(intrp->htable,
4187 4188 intrp->intr_added);
4188 4189 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4189 4190 "block enable - status 0x%x total inums #%d\n",
4190 4191 status, intrp->intr_added));
4191 4192 } else {
4192 4193 for (i = 0; i < intrp->intr_added; i++) {
4193 4194 status = ddi_intr_enable(intrp->htable[i]);
4194 4195 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4195 4196 "ddi_intr_enable:enable - status 0x%x "
4196 4197 "total inums %d enable inum #%d\n",
4197 4198 status, intrp->intr_added, i));
4198 4199 if (status == DDI_SUCCESS) {
4199 4200 intrp->intr_enabled = B_TRUE;
4200 4201 }
4201 4202 }
4202 4203 }
4203 4204
4204 4205 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4205 4206 }
4206 4207
4207 4208 /*ARGSUSED*/
4208 4209 static void
4209 4210 hxge_intrs_disable(p_hxge_t hxgep)
4210 4211 {
4211 4212 p_hxge_intr_t intrp;
4212 4213 int i;
4213 4214
4214 4215 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4215 4216
4216 4217 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4217 4218
4218 4219 if (!intrp->intr_registered) {
4219 4220 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4220 4221 "interrupts are not registered"));
4221 4222 return;
4222 4223 }
4223 4224
4224 4225 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4225 4226 (void) ddi_intr_block_disable(intrp->htable,
4226 4227 intrp->intr_added);
4227 4228 } else {
4228 4229 for (i = 0; i < intrp->intr_added; i++) {
4229 4230 (void) ddi_intr_disable(intrp->htable[i]);
4230 4231 }
4231 4232 }
4232 4233
4233 4234 intrp->intr_enabled = B_FALSE;
4234 4235 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4235 4236 }
4236 4237
4237 4238 static hxge_status_t
4238 4239 hxge_mac_register(p_hxge_t hxgep)
4239 4240 {
4240 4241 mac_register_t *macp;
4241 4242 int status;
4242 4243
4243 4244 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4244 4245
4245 4246 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4246 4247 return (HXGE_ERROR);
4247 4248
4248 4249 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4249 4250 macp->m_driver = hxgep;
4250 4251 macp->m_dip = hxgep->dip;
4251 4252 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4252 4253 macp->m_callbacks = &hxge_m_callbacks;
4253 4254 macp->m_min_sdu = 0;
4254 4255 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4255 4256 macp->m_margin = VLAN_TAGSZ;
4256 4257 macp->m_priv_props = hxge_priv_props;
4257 4258 macp->m_v12n = MAC_VIRT_LEVEL1;
4258 4259
4259 4260 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4260 4261 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4261 4262 macp->m_src_addr[0],
4262 4263 macp->m_src_addr[1],
4263 4264 macp->m_src_addr[2],
4264 4265 macp->m_src_addr[3],
4265 4266 macp->m_src_addr[4],
4266 4267 macp->m_src_addr[5]));
4267 4268
4268 4269 status = mac_register(macp, &hxgep->mach);
4269 4270 mac_free(macp);
4270 4271
4271 4272 if (status != 0) {
4272 4273 cmn_err(CE_WARN,
4273 4274 "hxge_mac_register failed (status %d instance %d)",
4274 4275 status, hxgep->instance);
4275 4276 return (HXGE_ERROR);
4276 4277 }
4277 4278
4278 4279 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4279 4280 "(instance %d)", hxgep->instance));
4280 4281
4281 4282 return (HXGE_OK);
4282 4283 }
4283 4284
4284 4285 static int
4285 4286 hxge_init_common_dev(p_hxge_t hxgep)
4286 4287 {
4287 4288 p_hxge_hw_list_t hw_p;
4288 4289 dev_info_t *p_dip;
4289 4290
4290 4291 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4291 4292
4292 4293 p_dip = hxgep->p_dip;
4293 4294 MUTEX_ENTER(&hxge_common_lock);
4294 4295
4295 4296 /*
4296 4297 * Loop through existing per Hydra hardware list.
4297 4298 */
4298 4299 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4299 4300 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4300 4301 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4301 4302 hw_p, p_dip));
4302 4303 if (hw_p->parent_devp == p_dip) {
4303 4304 hxgep->hxge_hw_p = hw_p;
4304 4305 hw_p->ndevs++;
4305 4306 hw_p->hxge_p = hxgep;
4306 4307 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4307 4308 "==> hxge_init_common_device: "
4308 4309 "hw_p $%p parent dip $%p ndevs %d (found)",
4309 4310 hw_p, p_dip, hw_p->ndevs));
4310 4311 break;
4311 4312 }
4312 4313 }
4313 4314
4314 4315 if (hw_p == NULL) {
4315 4316 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4316 4317 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4317 4318 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4318 4319 hw_p->parent_devp = p_dip;
4319 4320 hw_p->magic = HXGE_MAGIC;
4320 4321 hxgep->hxge_hw_p = hw_p;
4321 4322 hw_p->ndevs++;
4322 4323 hw_p->hxge_p = hxgep;
4323 4324 hw_p->next = hxge_hw_list;
4324 4325
4325 4326 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4326 4327 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4327 4328 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4328 4329
4329 4330 hxge_hw_list = hw_p;
4330 4331 }
4331 4332 MUTEX_EXIT(&hxge_common_lock);
4332 4333 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4333 4334 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4334 4335 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4335 4336
4336 4337 return (HXGE_OK);
4337 4338 }
4338 4339
4339 4340 static void
4340 4341 hxge_uninit_common_dev(p_hxge_t hxgep)
4341 4342 {
4342 4343 p_hxge_hw_list_t hw_p, h_hw_p;
4343 4344 dev_info_t *p_dip;
4344 4345
4345 4346 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4346 4347 if (hxgep->hxge_hw_p == NULL) {
4347 4348 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4348 4349 "<== hxge_uninit_common_dev (no common)"));
4349 4350 return;
4350 4351 }
4351 4352
4352 4353 MUTEX_ENTER(&hxge_common_lock);
4353 4354 h_hw_p = hxge_hw_list;
4354 4355 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4355 4356 p_dip = hw_p->parent_devp;
4356 4357 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4357 4358 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4358 4359 hw_p->magic == HXGE_MAGIC) {
4359 4360 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4360 4361 "==> hxge_uninit_common_dev: "
4361 4362 "hw_p $%p parent dip $%p ndevs %d (found)",
4362 4363 hw_p, p_dip, hw_p->ndevs));
4363 4364
4364 4365 hxgep->hxge_hw_p = NULL;
4365 4366 if (hw_p->ndevs) {
4366 4367 hw_p->ndevs--;
4367 4368 }
4368 4369 hw_p->hxge_p = NULL;
4369 4370 if (!hw_p->ndevs) {
4370 4371 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4371 4372 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4372 4373 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4373 4374 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4374 4375 "==> hxge_uninit_common_dev: "
4375 4376 "hw_p $%p parent dip $%p ndevs %d (last)",
4376 4377 hw_p, p_dip, hw_p->ndevs));
4377 4378
4378 4379 if (hw_p == hxge_hw_list) {
4379 4380 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4380 4381 "==> hxge_uninit_common_dev:"
4381 4382 "remove head "
4382 4383 "hw_p $%p parent dip $%p "
4383 4384 "ndevs %d (head)",
4384 4385 hw_p, p_dip, hw_p->ndevs));
4385 4386 hxge_hw_list = hw_p->next;
4386 4387 } else {
4387 4388 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4388 4389 "==> hxge_uninit_common_dev:"
4389 4390 "remove middle "
4390 4391 "hw_p $%p parent dip $%p "
4391 4392 "ndevs %d (middle)",
4392 4393 hw_p, p_dip, hw_p->ndevs));
4393 4394 h_hw_p->next = hw_p->next;
4394 4395 }
4395 4396
4396 4397 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4397 4398 }
4398 4399 break;
4399 4400 } else {
4400 4401 h_hw_p = hw_p;
4401 4402 }
4402 4403 }
4403 4404
4404 4405 MUTEX_EXIT(&hxge_common_lock);
4405 4406 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4406 4407 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4407 4408
4408 4409 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4409 4410 }
4410 4411
4411 4412 #define HXGE_MSIX_ENTRIES 32
4412 4413 #define HXGE_MSIX_WAIT_COUNT 10
4413 4414 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4414 4415
4415 4416 static void
4416 4417 hxge_link_poll(void *arg)
4417 4418 {
4418 4419 p_hxge_t hxgep = (p_hxge_t)arg;
4419 4420 hpi_handle_t handle;
4420 4421 cip_link_stat_t link_stat;
4421 4422 hxge_timeout *to = &hxgep->timeout;
4422 4423
4423 4424 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4424 4425 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4425 4426
4426 4427 if (to->report_link_status ||
4427 4428 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4428 4429 to->link_status = link_stat.bits.xpcs0_link_up;
4429 4430 to->report_link_status = B_FALSE;
4430 4431
4431 4432 if (link_stat.bits.xpcs0_link_up) {
4432 4433 hxge_link_update(hxgep, LINK_STATE_UP);
4433 4434 } else {
4434 4435 hxge_link_update(hxgep, LINK_STATE_DOWN);
4435 4436 }
4436 4437 }
4437 4438
4438 4439 /* Restart the link status timer to check the link status */
4439 4440 MUTEX_ENTER(&to->lock);
4440 4441 to->id = timeout(hxge_link_poll, arg, to->ticks);
4441 4442 MUTEX_EXIT(&to->lock);
4442 4443 }
4443 4444
4444 4445 static void
4445 4446 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4446 4447 {
4447 4448 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4448 4449
4449 4450 mac_link_update(hxgep->mach, state);
4450 4451 if (state == LINK_STATE_UP) {
4451 4452 statsp->mac_stats.link_speed = 10000;
4452 4453 statsp->mac_stats.link_duplex = 2;
4453 4454 statsp->mac_stats.link_up = 1;
4454 4455 } else {
4455 4456 statsp->mac_stats.link_speed = 0;
4456 4457 statsp->mac_stats.link_duplex = 0;
4457 4458 statsp->mac_stats.link_up = 0;
4458 4459 }
4459 4460 }
4460 4461
4461 4462 static void
4462 4463 hxge_msix_init(p_hxge_t hxgep)
4463 4464 {
4464 4465 uint32_t data0;
4465 4466 uint32_t data1;
4466 4467 uint32_t data2;
4467 4468 int i;
4468 4469 uint32_t msix_entry0;
4469 4470 uint32_t msix_entry1;
4470 4471 uint32_t msix_entry2;
4471 4472 uint32_t msix_entry3;
4472 4473
4473 4474 /* Change to use MSIx bar instead of indirect access */
4474 4475 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4475 4476 data0 = 0xffffffff - i;
4476 4477 data1 = 0xffffffff - i - 1;
4477 4478 data2 = 0xffffffff - i - 2;
4478 4479
4479 4480 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4480 4481 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4481 4482 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4482 4483 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4483 4484 }
4484 4485
4485 4486 /* Initialize ram data out buffer. */
4486 4487 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4487 4488 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4488 4489 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4489 4490 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4490 4491 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4491 4492 }
4492 4493 }
4493 4494
4494 4495 /*
4495 4496 * The following function is to support
4496 4497 * PSARC/2007/453 MSI-X interrupt limit override.
4497 4498 */
4498 4499 static int
4499 4500 hxge_create_msi_property(p_hxge_t hxgep)
4500 4501 {
4501 4502 int nmsi;
4502 4503 extern int ncpus;
4503 4504
4504 4505 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4505 4506
4506 4507 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4507 4508 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4508 4509 /*
4509 4510 * The maximum MSI-X requested will be 8.
4510 4511 * If the # of CPUs is less than 8, we will reqeust
4511 4512 * # MSI-X based on the # of CPUs.
4512 4513 */
4513 4514 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4514 4515 nmsi = HXGE_MSIX_REQUEST_10G;
4515 4516 } else {
4516 4517 nmsi = ncpus;
4517 4518 }
4518 4519
4519 4520 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4520 4521 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4521 4522 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4522 4523 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4523 4524
4524 4525 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4525 4526 return (nmsi);
4526 4527 }
↓ open down ↓ |
1010 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX