Print this page
6064 ixgbe needs X550 support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ixgbe/core/ixgbe_82598.c
+++ new/usr/src/uts/common/io/ixgbe/core/ixgbe_82598.c
1 1 /******************************************************************************
2 2
3 - Copyright (c) 2001-2012, Intel Corporation
3 + Copyright (c) 2001-2015, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 -/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
33 +/*$FreeBSD$*/
34 34
35 35 #include "ixgbe_type.h"
36 36 #include "ixgbe_82598.h"
37 37 #include "ixgbe_api.h"
38 38 #include "ixgbe_common.h"
39 39 #include "ixgbe_phy.h"
40 40
41 +#define IXGBE_82598_MAX_TX_QUEUES 32
42 +#define IXGBE_82598_MAX_RX_QUEUES 64
43 +#define IXGBE_82598_RAR_ENTRIES 16
44 +#define IXGBE_82598_MC_TBL_SIZE 128
45 +#define IXGBE_82598_VFT_TBL_SIZE 128
46 +#define IXGBE_82598_RX_PB_SIZE 512
47 +
41 48 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42 49 ixgbe_link_speed *speed,
43 50 bool *autoneg);
44 51 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45 52 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46 53 bool autoneg_wait_to_complete);
47 54 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48 55 ixgbe_link_speed *speed, bool *link_up,
49 56 bool link_up_wait_to_complete);
50 57 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51 58 ixgbe_link_speed speed,
52 - bool autoneg,
53 59 bool autoneg_wait_to_complete);
54 60 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
55 61 ixgbe_link_speed speed,
56 - bool autoneg,
57 62 bool autoneg_wait_to_complete);
58 63 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
59 64 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
60 65 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
61 66 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
62 67 u32 headroom, int strategy);
63 -
68 +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
69 + u8 *sff8472_data);
64 70 /**
65 71 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
66 72 * @hw: pointer to the HW structure
67 73 *
68 74 * The defaults for 82598 should be in the range of 50us to 50ms,
69 75 * however the hardware default for these parts is 500us to 1ms which is less
70 76 * than the 10ms recommended by the pci-e spec. To address this we need to
71 77 * increase the value to either 10ms to 250ms for capability version 1 config,
72 78 * or 16ms to 55ms for version 2.
73 79 **/
74 80 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
75 81 {
76 82 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
77 83 u16 pcie_devctl2;
78 84
79 85 /* only take action if timeout value is defaulted to 0 */
80 86 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
81 87 goto out;
82 88
83 89 /*
84 90 * if capababilities version is type 1 we can write the
85 91 * timeout of 10ms to 250ms through the GCR register
86 92 */
87 93 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
88 94 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
89 95 goto out;
90 96 }
91 97
92 98 /*
93 99 * for version 2 capabilities we need to write the config space
94 100 * directly in order to set the completion timeout value for
95 101 * 16ms to 55ms
96 102 */
97 103 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
98 104 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
99 105 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
100 106 out:
101 107 /* disable completion timeout resend */
102 108 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
103 109 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
104 110 }
105 111
106 112 /**
107 113 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
108 114 * @hw: pointer to hardware structure
109 115 *
110 116 * Initialize the function pointers and assign the MAC type for 82598.
111 117 * Does not touch the hardware.
112 118 **/
113 119 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
114 120 {
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
115 121 struct ixgbe_mac_info *mac = &hw->mac;
116 122 struct ixgbe_phy_info *phy = &hw->phy;
117 123 s32 ret_val;
118 124
119 125 DEBUGFUNC("ixgbe_init_ops_82598");
120 126
121 127 ret_val = ixgbe_init_phy_ops_generic(hw);
122 128 ret_val = ixgbe_init_ops_generic(hw);
123 129
124 130 /* PHY */
125 - phy->ops.init = &ixgbe_init_phy_ops_82598;
131 + phy->ops.init = ixgbe_init_phy_ops_82598;
126 132
127 133 /* MAC */
128 - mac->ops.start_hw = &ixgbe_start_hw_82598;
129 - mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
130 - mac->ops.reset_hw = &ixgbe_reset_hw_82598;
131 - mac->ops.get_media_type = &ixgbe_get_media_type_82598;
134 + mac->ops.start_hw = ixgbe_start_hw_82598;
135 + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
136 + mac->ops.reset_hw = ixgbe_reset_hw_82598;
137 + mac->ops.get_media_type = ixgbe_get_media_type_82598;
132 138 mac->ops.get_supported_physical_layer =
133 - &ixgbe_get_supported_physical_layer_82598;
134 - mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
135 - mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
136 - mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
139 + ixgbe_get_supported_physical_layer_82598;
140 + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
141 + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
142 + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
143 + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
137 144
138 145 /* RAR, Multicast, VLAN */
139 - mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
140 - mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
141 - mac->ops.set_vfta = &ixgbe_set_vfta_82598;
146 + mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
147 + mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
148 + mac->ops.set_vfta = ixgbe_set_vfta_82598;
142 149 mac->ops.set_vlvf = NULL;
143 - mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
150 + mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
144 151
145 152 /* Flow Control */
146 - mac->ops.fc_enable = &ixgbe_fc_enable_82598;
153 + mac->ops.fc_enable = ixgbe_fc_enable_82598;
147 154
148 - mac->mcft_size = 128;
149 - mac->vft_size = 128;
150 - mac->num_rar_entries = 16;
151 - mac->rx_pb_size = 512;
152 - mac->max_tx_queues = 32;
153 - mac->max_rx_queues = 64;
155 + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
156 + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
157 + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
158 + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
159 + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
160 + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
154 161 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
155 162
156 163 /* SFP+ Module */
157 - phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
164 + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
165 + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
158 166
159 167 /* Link */
160 - mac->ops.check_link = &ixgbe_check_mac_link_82598;
161 - mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
168 + mac->ops.check_link = ixgbe_check_mac_link_82598;
169 + mac->ops.setup_link = ixgbe_setup_mac_link_82598;
162 170 mac->ops.flap_tx_laser = NULL;
163 - mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164 - mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
171 + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
172 + mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
165 173
166 174 /* Manageability interface */
167 175 mac->ops.set_fw_drv_ver = NULL;
168 176
177 + mac->ops.get_rtrup2tc = NULL;
178 +
169 179 return ret_val;
170 180 }
171 181
172 182 /**
173 183 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
174 184 * @hw: pointer to hardware structure
175 185 *
176 186 * Initialize any function pointers that were not able to be
177 187 * set during init_shared_code because the PHY/SFP type was
178 188 * not known. Perform the SFP init if necessary.
179 189 *
180 190 **/
181 191 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
182 192 {
183 193 struct ixgbe_mac_info *mac = &hw->mac;
184 194 struct ixgbe_phy_info *phy = &hw->phy;
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
185 195 s32 ret_val = IXGBE_SUCCESS;
186 196 u16 list_offset, data_offset;
187 197
188 198 DEBUGFUNC("ixgbe_init_phy_ops_82598");
189 199
190 200 /* Identify the PHY */
191 201 phy->ops.identify(hw);
192 202
193 203 /* Overwrite the link function pointers if copper PHY */
194 204 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
195 - mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
205 + mac->ops.setup_link = ixgbe_setup_copper_link_82598;
196 206 mac->ops.get_link_capabilities =
197 - &ixgbe_get_copper_link_capabilities_generic;
207 + ixgbe_get_copper_link_capabilities_generic;
198 208 }
199 209
200 210 switch (hw->phy.type) {
201 211 case ixgbe_phy_tn:
202 - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
203 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
212 + phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
213 + phy->ops.check_link = ixgbe_check_phy_link_tnx;
204 214 phy->ops.get_firmware_version =
205 - &ixgbe_get_phy_firmware_version_tnx;
215 + ixgbe_get_phy_firmware_version_tnx;
206 216 break;
207 217 case ixgbe_phy_nl:
208 - phy->ops.reset = &ixgbe_reset_phy_nl;
218 + phy->ops.reset = ixgbe_reset_phy_nl;
209 219
210 220 /* Call SFP+ identify routine to get the SFP+ module type */
211 221 ret_val = phy->ops.identify_sfp(hw);
212 222 if (ret_val != IXGBE_SUCCESS)
213 223 goto out;
214 224 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
215 225 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
216 226 goto out;
217 227 }
218 228
219 229 /* Check to see if SFP+ module is supported */
220 230 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
221 231 &list_offset,
222 232 &data_offset);
223 233 if (ret_val != IXGBE_SUCCESS) {
224 234 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
225 235 goto out;
226 236 }
227 237 break;
228 238 default:
229 239 break;
230 240 }
231 241
232 242 out:
233 243 return ret_val;
234 244 }
235 245
236 246 /**
237 247 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
238 248 * @hw: pointer to hardware structure
239 249 *
240 250 * Starts the hardware using the generic start_hw function.
241 251 * Disables relaxed ordering Then set pcie completion timeout
242 252 *
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
243 253 **/
244 254 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
245 255 {
246 256 u32 regval;
247 257 u32 i;
248 258 s32 ret_val = IXGBE_SUCCESS;
249 259
250 260 DEBUGFUNC("ixgbe_start_hw_82598");
251 261
252 262 ret_val = ixgbe_start_hw_generic(hw);
263 + if (ret_val)
264 + return ret_val;
253 265
254 266 /* Disable relaxed ordering */
255 267 for (i = 0; ((i < hw->mac.max_tx_queues) &&
256 268 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
257 269 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
258 270 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
259 271 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
260 272 }
261 273
262 274 for (i = 0; ((i < hw->mac.max_rx_queues) &&
263 275 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
264 276 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
265 277 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
266 278 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
267 279 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
268 280 }
269 281
270 282 /* set the completion timeout for interface */
271 - if (ret_val == IXGBE_SUCCESS)
272 - ixgbe_set_pcie_completion_timeout(hw);
283 + ixgbe_set_pcie_completion_timeout(hw);
273 284
274 285 return ret_val;
275 286 }
276 287
277 288 /**
278 289 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
279 290 * @hw: pointer to hardware structure
280 291 * @speed: pointer to link speed
281 292 * @autoneg: boolean auto-negotiation value
282 293 *
283 294 * Determines the link capabilities by reading the AUTOC register.
284 295 **/
285 296 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
286 297 ixgbe_link_speed *speed,
287 298 bool *autoneg)
288 299 {
289 300 s32 status = IXGBE_SUCCESS;
290 301 u32 autoc = 0;
291 302
292 303 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
293 304
294 305 /*
295 306 * Determine link capabilities based on the stored value of AUTOC,
296 307 * which represents EEPROM defaults. If AUTOC value has not been
297 308 * stored, use the current register value.
298 309 */
299 310 if (hw->mac.orig_link_settings_stored)
300 311 autoc = hw->mac.orig_autoc;
301 312 else
302 313 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
303 314
304 315 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
305 316 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
306 317 *speed = IXGBE_LINK_SPEED_1GB_FULL;
307 318 *autoneg = FALSE;
308 319 break;
309 320
310 321 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
311 322 *speed = IXGBE_LINK_SPEED_10GB_FULL;
312 323 *autoneg = FALSE;
313 324 break;
314 325
315 326 case IXGBE_AUTOC_LMS_1G_AN:
316 327 *speed = IXGBE_LINK_SPEED_1GB_FULL;
317 328 *autoneg = TRUE;
318 329 break;
319 330
320 331 case IXGBE_AUTOC_LMS_KX4_AN:
321 332 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
322 333 *speed = IXGBE_LINK_SPEED_UNKNOWN;
323 334 if (autoc & IXGBE_AUTOC_KX4_SUPP)
324 335 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
325 336 if (autoc & IXGBE_AUTOC_KX_SUPP)
326 337 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
327 338 *autoneg = TRUE;
328 339 break;
329 340
330 341 default:
331 342 status = IXGBE_ERR_LINK_SETUP;
332 343 break;
333 344 }
334 345
335 346 return status;
336 347 }
337 348
338 349 /**
339 350 * ixgbe_get_media_type_82598 - Determines media type
340 351 * @hw: pointer to hardware structure
341 352 *
342 353 * Returns the media type (fiber, copper, backplane)
343 354 **/
344 355 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
345 356 {
346 357 enum ixgbe_media_type media_type;
347 358
348 359 DEBUGFUNC("ixgbe_get_media_type_82598");
349 360
350 361 /* Detect if there is a copper PHY attached. */
351 362 switch (hw->phy.type) {
352 363 case ixgbe_phy_cu_unknown:
353 364 case ixgbe_phy_tn:
354 365 media_type = ixgbe_media_type_copper;
355 366 goto out;
356 367 default:
357 368 break;
358 369 }
359 370
360 371 /* Media type for I82598 is based on device ID */
361 372 switch (hw->device_id) {
362 373 case IXGBE_DEV_ID_82598:
363 374 case IXGBE_DEV_ID_82598_BX:
364 375 /* Default device ID is mezzanine card KX/KX4 */
365 376 media_type = ixgbe_media_type_backplane;
366 377 break;
367 378 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
368 379 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
369 380 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
370 381 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
371 382 case IXGBE_DEV_ID_82598EB_XF_LR:
372 383 case IXGBE_DEV_ID_82598EB_SFP_LOM:
373 384 media_type = ixgbe_media_type_fiber;
374 385 break;
375 386 case IXGBE_DEV_ID_82598EB_CX4:
376 387 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
377 388 media_type = ixgbe_media_type_cx4;
378 389 break;
379 390 case IXGBE_DEV_ID_82598AT:
380 391 case IXGBE_DEV_ID_82598AT2:
381 392 media_type = ixgbe_media_type_copper;
382 393 break;
383 394 default:
384 395 media_type = ixgbe_media_type_unknown;
385 396 break;
386 397 }
387 398 out:
388 399 return media_type;
389 400 }
390 401
391 402 /**
392 403 * ixgbe_fc_enable_82598 - Enable flow control
393 404 * @hw: pointer to hardware structure
394 405 *
395 406 * Enable flow control according to the current settings.
396 407 **/
397 408 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
398 409 {
399 410 s32 ret_val = IXGBE_SUCCESS;
400 411 u32 fctrl_reg;
401 412 u32 rmcs_reg;
402 413 u32 reg;
403 414 u32 fcrtl, fcrth;
404 415 u32 link_speed = 0;
405 416 int i;
406 417 bool link_up;
407 418
408 419 DEBUGFUNC("ixgbe_fc_enable_82598");
409 420
410 421 /* Validate the water mark configuration */
411 422 if (!hw->fc.pause_time) {
412 423 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
413 424 goto out;
414 425 }
415 426
416 427 /* Low water mark of zero causes XOFF floods */
417 428 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
418 429 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
419 430 hw->fc.high_water[i]) {
420 431 if (!hw->fc.low_water[i] ||
421 432 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
422 433 DEBUGOUT("Invalid water mark configuration\n");
423 434 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424 435 goto out;
425 436 }
426 437 }
427 438 }
428 439
429 440 /*
430 441 * On 82598 having Rx FC on causes resets while doing 1G
431 442 * so if it's on turn it off once we know link_speed. For
432 443 * more details see 82598 Specification update.
433 444 */
434 445 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
435 446 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
436 447 switch (hw->fc.requested_mode) {
437 448 case ixgbe_fc_full:
438 449 hw->fc.requested_mode = ixgbe_fc_tx_pause;
439 450 break;
440 451 case ixgbe_fc_rx_pause:
441 452 hw->fc.requested_mode = ixgbe_fc_none;
442 453 break;
443 454 default:
444 455 /* no change */
445 456 break;
446 457 }
447 458 }
448 459
449 460 /* Negotiate the fc mode to use */
450 461 ixgbe_fc_autoneg(hw);
451 462
452 463 /* Disable any previous flow control settings */
453 464 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
454 465 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
455 466
456 467 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
457 468 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
458 469
459 470 /*
460 471 * The possible values of fc.current_mode are:
461 472 * 0: Flow control is completely disabled
462 473 * 1: Rx flow control is enabled (we can receive pause frames,
463 474 * but not send pause frames).
464 475 * 2: Tx flow control is enabled (we can send pause frames but
465 476 * we do not support receiving pause frames).
466 477 * 3: Both Rx and Tx flow control (symmetric) are enabled.
467 478 * other: Invalid.
468 479 */
469 480 switch (hw->fc.current_mode) {
470 481 case ixgbe_fc_none:
471 482 /*
472 483 * Flow control is disabled by software override or autoneg.
473 484 * The code below will actually disable it in the HW.
474 485 */
475 486 break;
476 487 case ixgbe_fc_rx_pause:
477 488 /*
478 489 * Rx Flow control is enabled and Tx Flow control is
479 490 * disabled by software override. Since there really
480 491 * isn't a way to advertise that we are capable of RX
481 492 * Pause ONLY, we will advertise that we support both
482 493 * symmetric and asymmetric Rx PAUSE. Later, we will
483 494 * disable the adapter's ability to send PAUSE frames.
484 495 */
485 496 fctrl_reg |= IXGBE_FCTRL_RFCE;
486 497 break;
487 498 case ixgbe_fc_tx_pause:
488 499 /*
489 500 * Tx Flow control is enabled, and Rx Flow control is
490 501 * disabled by software override.
491 502 */
492 503 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
↓ open down ↓ |
210 lines elided |
↑ open up ↑ |
493 504 break;
494 505 case ixgbe_fc_full:
495 506 /* Flow control (both Rx and Tx) is enabled by SW override. */
496 507 fctrl_reg |= IXGBE_FCTRL_RFCE;
497 508 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
498 509 break;
499 510 default:
500 511 DEBUGOUT("Flow control param set incorrectly\n");
501 512 ret_val = IXGBE_ERR_CONFIG;
502 513 goto out;
514 + break;
503 515 }
504 516
505 517 /* Set 802.3x based flow control settings. */
506 518 fctrl_reg |= IXGBE_FCTRL_DPF;
507 519 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
508 520 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
509 521
510 522 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
511 523 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
512 524 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
513 525 hw->fc.high_water[i]) {
514 526 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
515 527 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
516 528 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
517 529 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
518 530 } else {
519 531 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
520 532 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
521 533 }
522 534
523 535 }
524 536
525 537 /* Configure pause time (2 TCs per register) */
526 538 reg = hw->fc.pause_time * 0x00010001;
527 539 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
528 540 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
529 541
530 542 /* Configure flow control refresh threshold value */
531 543 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
532 544
533 545 out:
534 546 return ret_val;
535 547 }
536 548
537 549 /**
538 550 * ixgbe_start_mac_link_82598 - Configures MAC link settings
539 551 * @hw: pointer to hardware structure
540 552 *
541 553 * Configures link settings based on values in the ixgbe_hw struct.
542 554 * Restarts the link. Performs autonegotiation if needed.
543 555 **/
544 556 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
545 557 bool autoneg_wait_to_complete)
546 558 {
547 559 u32 autoc_reg;
548 560 u32 links_reg;
549 561 u32 i;
550 562 s32 status = IXGBE_SUCCESS;
551 563
552 564 DEBUGFUNC("ixgbe_start_mac_link_82598");
553 565
554 566 /* Restart link */
555 567 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
556 568 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
557 569 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
558 570
559 571 /* Only poll for autoneg to complete if specified to do so */
560 572 if (autoneg_wait_to_complete) {
561 573 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
562 574 IXGBE_AUTOC_LMS_KX4_AN ||
563 575 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
564 576 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
565 577 links_reg = 0; /* Just in case Autoneg time = 0 */
566 578 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
567 579 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
568 580 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
569 581 break;
570 582 msec_delay(100);
571 583 }
572 584 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
573 585 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
574 586 DEBUGOUT("Autonegotiation did not complete.\n");
575 587 }
576 588 }
577 589 }
578 590
579 591 /* Add delay to filter out noises during initial link setup */
580 592 msec_delay(50);
581 593
582 594 return status;
583 595 }
584 596
585 597 /**
586 598 * ixgbe_validate_link_ready - Function looks for phy link
587 599 * @hw: pointer to hardware structure
588 600 *
589 601 * Function indicates success when phy link is available. If phy is not ready
590 602 * within 5 seconds of MAC indicating link, the function returns error.
591 603 **/
592 604 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
593 605 {
594 606 u32 timeout;
595 607 u16 an_reg;
596 608
597 609 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
598 610 return IXGBE_SUCCESS;
599 611
600 612 for (timeout = 0;
601 613 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
602 614 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
603 615 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
604 616
605 617 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
606 618 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
607 619 break;
608 620
609 621 msec_delay(100);
610 622 }
611 623
612 624 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
613 625 DEBUGOUT("Link was indicated but link is down\n");
614 626 return IXGBE_ERR_LINK_SETUP;
615 627 }
616 628
617 629 return IXGBE_SUCCESS;
618 630 }
619 631
620 632 /**
621 633 * ixgbe_check_mac_link_82598 - Get link/speed status
622 634 * @hw: pointer to hardware structure
623 635 * @speed: pointer to link speed
624 636 * @link_up: TRUE is link is up, FALSE otherwise
625 637 * @link_up_wait_to_complete: bool used to wait for link up or not
626 638 *
627 639 * Reads the links register to determine if link is up and the current speed
628 640 **/
629 641 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
630 642 ixgbe_link_speed *speed, bool *link_up,
631 643 bool link_up_wait_to_complete)
632 644 {
633 645 u32 links_reg;
634 646 u32 i;
635 647 u16 link_reg, adapt_comp_reg;
636 648
637 649 DEBUGFUNC("ixgbe_check_mac_link_82598");
638 650
639 651 /*
640 652 * SERDES PHY requires us to read link status from undocumented
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
641 653 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
642 654 * indicates link down. OxC00C is read to check that the XAUI lanes
643 655 * are active. Bit 0 clear indicates active; set indicates inactive.
644 656 */
645 657 if (hw->phy.type == ixgbe_phy_nl) {
646 658 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
647 659 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
648 660 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
649 661 &adapt_comp_reg);
650 662 if (link_up_wait_to_complete) {
651 - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
663 + for (i = 0; i < hw->mac.max_link_up_time; i++) {
652 664 if ((link_reg & 1) &&
653 665 ((adapt_comp_reg & 1) == 0)) {
654 666 *link_up = TRUE;
655 667 break;
656 668 } else {
657 669 *link_up = FALSE;
658 670 }
659 671 msec_delay(100);
660 672 hw->phy.ops.read_reg(hw, 0xC79F,
661 673 IXGBE_TWINAX_DEV,
662 674 &link_reg);
663 675 hw->phy.ops.read_reg(hw, 0xC00C,
664 676 IXGBE_TWINAX_DEV,
665 677 &adapt_comp_reg);
666 678 }
667 679 } else {
668 680 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
669 681 *link_up = TRUE;
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
670 682 else
671 683 *link_up = FALSE;
672 684 }
673 685
674 686 if (*link_up == FALSE)
675 687 goto out;
676 688 }
677 689
678 690 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
679 691 if (link_up_wait_to_complete) {
680 - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
692 + for (i = 0; i < hw->mac.max_link_up_time; i++) {
681 693 if (links_reg & IXGBE_LINKS_UP) {
682 694 *link_up = TRUE;
683 695 break;
684 696 } else {
685 697 *link_up = FALSE;
686 698 }
687 699 msec_delay(100);
688 700 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
689 701 }
690 702 } else {
691 703 if (links_reg & IXGBE_LINKS_UP)
692 704 *link_up = TRUE;
693 705 else
694 706 *link_up = FALSE;
695 707 }
696 708
697 709 if (links_reg & IXGBE_LINKS_SPEED)
698 710 *speed = IXGBE_LINK_SPEED_10GB_FULL;
699 711 else
700 712 *speed = IXGBE_LINK_SPEED_1GB_FULL;
701 713
702 714 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
703 715 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
704 716 *link_up = FALSE;
705 717
706 718 out:
707 719 return IXGBE_SUCCESS;
708 720 }
709 721
710 722 /**
711 723 * ixgbe_setup_mac_link_82598 - Set MAC link speed
712 724 * @hw: pointer to hardware structure
713 725 * @speed: new link speed
714 - * @autoneg: TRUE if autonegotiation enabled
715 726 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
716 727 *
717 728 * Set the link speed in the AUTOC register and restarts link.
718 729 **/
719 730 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
720 - ixgbe_link_speed speed, bool autoneg,
731 + ixgbe_link_speed speed,
721 732 bool autoneg_wait_to_complete)
722 733 {
723 - s32 status;
734 + bool autoneg = FALSE;
735 + s32 status = IXGBE_SUCCESS;
724 736 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
725 737 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
726 738 u32 autoc = curr_autoc;
727 739 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
728 740
729 741 DEBUGFUNC("ixgbe_setup_mac_link_82598");
730 742
731 743 /* Check to see if speed passed in is supported. */
732 - status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
733 - if (status != IXGBE_SUCCESS)
734 - return (status);
744 + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
735 745 speed &= link_capabilities;
736 746
737 747 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
738 748 status = IXGBE_ERR_LINK_SETUP;
739 749
740 750 /* Set KX4/KX support according to speed requested */
741 751 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
742 752 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
743 753 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
744 754 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
745 755 autoc |= IXGBE_AUTOC_KX4_SUPP;
746 756 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
747 757 autoc |= IXGBE_AUTOC_KX_SUPP;
748 758 if (autoc != curr_autoc)
749 759 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
750 760 }
751 761
752 762 if (status == IXGBE_SUCCESS) {
753 763 /*
754 764 * Setup and restart the link based on the new values in
755 765 * ixgbe_hw This will write the AUTOC register based on the new
756 766 * stored values
757 767 */
758 768 status = ixgbe_start_mac_link_82598(hw,
759 769 autoneg_wait_to_complete);
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
760 770 }
761 771
762 772 return status;
763 773 }
764 774
765 775
766 776 /**
767 777 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
768 778 * @hw: pointer to hardware structure
769 779 * @speed: new link speed
770 - * @autoneg: TRUE if autonegotiation enabled
771 780 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
772 781 *
773 782 * Sets the link speed in the AUTOC register in the MAC and restarts link.
774 783 **/
775 784 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
776 785 ixgbe_link_speed speed,
777 - bool autoneg,
778 786 bool autoneg_wait_to_complete)
779 787 {
780 788 s32 status;
781 789
782 790 DEBUGFUNC("ixgbe_setup_copper_link_82598");
783 791
784 792 /* Setup the PHY according to input speed */
785 - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
793 + status = hw->phy.ops.setup_link_speed(hw, speed,
786 794 autoneg_wait_to_complete);
787 - if (status == IXGBE_SUCCESS) {
788 - /* Set up MAC */
789 - status =
790 - ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
791 - }
795 + /* Set up MAC */
796 + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
792 797
793 798 return status;
794 799 }
795 800
796 801 /**
797 802 * ixgbe_reset_hw_82598 - Performs hardware reset
798 803 * @hw: pointer to hardware structure
799 804 *
800 805 * Resets the hardware by resetting the transmit and receive units, masks and
801 806 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
802 807 * reset.
803 808 **/
804 809 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
805 810 {
806 811 s32 status = IXGBE_SUCCESS;
807 812 s32 phy_status = IXGBE_SUCCESS;
808 813 u32 ctrl;
809 814 u32 gheccr;
810 815 u32 i;
811 816 u32 autoc;
812 817 u8 analog_val;
813 818
814 819 DEBUGFUNC("ixgbe_reset_hw_82598");
815 820
816 821 /* Call adapter stop to disable tx/rx and clear interrupts */
817 822 status = hw->mac.ops.stop_adapter(hw);
818 823 if (status != IXGBE_SUCCESS)
819 824 goto reset_hw_out;
820 825
821 826 /*
822 827 * Power up the Atlas Tx lanes if they are currently powered down.
823 828 * Atlas Tx lanes are powered down for MAC loopback tests, but
824 829 * they are not automatically restored on reset.
825 830 */
826 831 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
827 832 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
828 833 /* Enable Tx Atlas so packets can be transmitted again */
829 834 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
830 835 &analog_val);
831 836 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
832 837 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
833 838 analog_val);
834 839
835 840 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
836 841 &analog_val);
837 842 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
838 843 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
839 844 analog_val);
840 845
841 846 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
842 847 &analog_val);
843 848 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
844 849 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
845 850 analog_val);
846 851
847 852 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
848 853 &analog_val);
849 854 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
850 855 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
851 856 analog_val);
852 857 }
853 858
854 859 /* Reset PHY */
855 860 if (hw->phy.reset_disable == FALSE) {
856 861 /* PHY ops must be identified and initialized prior to reset */
857 862
858 863 /* Init PHY and function pointers, perform SFP setup */
859 864 phy_status = hw->phy.ops.init(hw);
860 865 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
861 866 goto reset_hw_out;
862 867 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
863 868 goto mac_reset_top;
864 869
865 870 hw->phy.ops.reset(hw);
866 871 }
867 872
868 873 mac_reset_top:
869 874 /*
870 875 * Issue global reset to the MAC. This needs to be a SW reset.
871 876 * If link reset is used, it might reset the MAC when mng is using it
872 877 */
873 878 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
874 879 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
875 880 IXGBE_WRITE_FLUSH(hw);
876 881
877 882 /* Poll for reset bit to self-clear indicating reset is complete */
878 883 for (i = 0; i < 10; i++) {
879 884 usec_delay(1);
880 885 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
881 886 if (!(ctrl & IXGBE_CTRL_RST))
882 887 break;
883 888 }
884 889 if (ctrl & IXGBE_CTRL_RST) {
885 890 status = IXGBE_ERR_RESET_FAILED;
886 891 DEBUGOUT("Reset polling failed to complete.\n");
887 892 }
888 893
889 894 msec_delay(50);
890 895
891 896 /*
892 897 * Double resets are required for recovery from certain error
893 898 * conditions. Between resets, it is necessary to stall to allow time
894 899 * for any pending HW events to complete.
895 900 */
896 901 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
897 902 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
898 903 goto mac_reset_top;
899 904 }
900 905
901 906 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
902 907 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
903 908 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
904 909
905 910 /*
906 911 * Store the original AUTOC value if it has not been
907 912 * stored off yet. Otherwise restore the stored original
908 913 * AUTOC value since the reset operation sets back to deaults.
909 914 */
910 915 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
911 916 if (hw->mac.orig_link_settings_stored == FALSE) {
912 917 hw->mac.orig_autoc = autoc;
913 918 hw->mac.orig_link_settings_stored = TRUE;
914 919 } else if (autoc != hw->mac.orig_autoc) {
915 920 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
916 921 }
917 922
918 923 /* Store the permanent mac address */
919 924 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
920 925
921 926 /*
922 927 * Store MAC address from RAR0, clear receive address registers, and
923 928 * clear the multicast table
924 929 */
925 930 hw->mac.ops.init_rx_addrs(hw);
926 931
927 932 reset_hw_out:
928 933 if (phy_status != IXGBE_SUCCESS)
929 934 status = phy_status;
930 935
931 936 return status;
932 937 }
933 938
934 939 /**
935 940 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
936 941 * @hw: pointer to hardware struct
937 942 * @rar: receive address register index to associate with a VMDq index
938 943 * @vmdq: VMDq set index
939 944 **/
940 945 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
941 946 {
942 947 u32 rar_high;
943 948 u32 rar_entries = hw->mac.num_rar_entries;
944 949
945 950 DEBUGFUNC("ixgbe_set_vmdq_82598");
946 951
947 952 /* Make sure we are using a valid rar index range */
948 953 if (rar >= rar_entries) {
949 954 DEBUGOUT1("RAR index %d is out of range.\n", rar);
950 955 return IXGBE_ERR_INVALID_ARGUMENT;
951 956 }
952 957
953 958 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
954 959 rar_high &= ~IXGBE_RAH_VIND_MASK;
955 960 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
956 961 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
957 962 return IXGBE_SUCCESS;
958 963 }
959 964
960 965 /**
961 966 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
962 967 * @hw: pointer to hardware struct
963 968 * @rar: receive address register index to associate with a VMDq index
964 969 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
965 970 **/
966 971 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
967 972 {
968 973 u32 rar_high;
969 974 u32 rar_entries = hw->mac.num_rar_entries;
970 975
971 976 UNREFERENCED_1PARAMETER(vmdq);
972 977
973 978 /* Make sure we are using a valid rar index range */
974 979 if (rar >= rar_entries) {
975 980 DEBUGOUT1("RAR index %d is out of range.\n", rar);
976 981 return IXGBE_ERR_INVALID_ARGUMENT;
977 982 }
978 983
979 984 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
980 985 if (rar_high & IXGBE_RAH_VIND_MASK) {
981 986 rar_high &= ~IXGBE_RAH_VIND_MASK;
982 987 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
983 988 }
984 989
985 990 return IXGBE_SUCCESS;
986 991 }
987 992
988 993 /**
989 994 * ixgbe_set_vfta_82598 - Set VLAN filter table
990 995 * @hw: pointer to hardware structure
991 996 * @vlan: VLAN id to write to VLAN filter
992 997 * @vind: VMDq output index that maps queue to VLAN id in VFTA
993 998 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
994 999 *
995 1000 * Turn on/off specified VLAN in the VLAN filter table.
996 1001 **/
997 1002 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
998 1003 bool vlan_on)
999 1004 {
1000 1005 u32 regindex;
1001 1006 u32 bitindex;
1002 1007 u32 bits;
1003 1008 u32 vftabyte;
1004 1009
1005 1010 DEBUGFUNC("ixgbe_set_vfta_82598");
1006 1011
1007 1012 if (vlan > 4095)
1008 1013 return IXGBE_ERR_PARAM;
1009 1014
1010 1015 /* Determine 32-bit word position in array */
1011 1016 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1012 1017
1013 1018 /* Determine the location of the (VMD) queue index */
1014 1019 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1015 1020 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1016 1021
1017 1022 /* Set the nibble for VMD queue index */
1018 1023 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1019 1024 bits &= (~(0x0F << bitindex));
1020 1025 bits |= (vind << bitindex);
1021 1026 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1022 1027
1023 1028 /* Determine the location of the bit for this VLAN id */
1024 1029 bitindex = vlan & 0x1F; /* lower five bits */
1025 1030
1026 1031 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1027 1032 if (vlan_on)
1028 1033 /* Turn on this VLAN id */
1029 1034 bits |= (1 << bitindex);
1030 1035 else
1031 1036 /* Turn off this VLAN id */
1032 1037 bits &= ~(1 << bitindex);
1033 1038 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1034 1039
1035 1040 return IXGBE_SUCCESS;
1036 1041 }
1037 1042
1038 1043 /**
1039 1044 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1040 1045 * @hw: pointer to hardware structure
1041 1046 *
1042 1047 * Clears the VLAN filer table, and the VMDq index associated with the filter
1043 1048 **/
1044 1049 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1045 1050 {
1046 1051 u32 offset;
1047 1052 u32 vlanbyte;
1048 1053
1049 1054 DEBUGFUNC("ixgbe_clear_vfta_82598");
1050 1055
1051 1056 for (offset = 0; offset < hw->mac.vft_size; offset++)
1052 1057 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1053 1058
1054 1059 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1055 1060 for (offset = 0; offset < hw->mac.vft_size; offset++)
1056 1061 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1057 1062 0);
1058 1063
1059 1064 return IXGBE_SUCCESS;
1060 1065 }
1061 1066
1062 1067 /**
1063 1068 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1064 1069 * @hw: pointer to hardware structure
1065 1070 * @reg: analog register to read
1066 1071 * @val: read value
1067 1072 *
1068 1073 * Performs read operation to Atlas analog register specified.
1069 1074 **/
1070 1075 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1071 1076 {
1072 1077 u32 atlas_ctl;
1073 1078
1074 1079 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1075 1080
1076 1081 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1077 1082 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1078 1083 IXGBE_WRITE_FLUSH(hw);
1079 1084 usec_delay(10);
1080 1085 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1081 1086 *val = (u8)atlas_ctl;
1082 1087
1083 1088 return IXGBE_SUCCESS;
1084 1089 }
1085 1090
1086 1091 /**
1087 1092 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1088 1093 * @hw: pointer to hardware structure
1089 1094 * @reg: atlas register to write
1090 1095 * @val: value to write
1091 1096 *
1092 1097 * Performs write operation to Atlas analog register specified.
1093 1098 **/
1094 1099 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1095 1100 {
1096 1101 u32 atlas_ctl;
1097 1102
1098 1103 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
↓ open down ↓ |
297 lines elided |
↑ open up ↑ |
1099 1104
1100 1105 atlas_ctl = (reg << 8) | val;
1101 1106 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1102 1107 IXGBE_WRITE_FLUSH(hw);
1103 1108 usec_delay(10);
1104 1109
1105 1110 return IXGBE_SUCCESS;
1106 1111 }
1107 1112
1108 1113 /**
1109 - * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1114 + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1110 1115 * @hw: pointer to hardware structure
1111 - * @byte_offset: EEPROM byte offset to read
1116 + * @dev_addr: address to read from
1117 + * @byte_offset: byte offset to read from dev_addr
1112 1118 * @eeprom_data: value read
1113 1119 *
1114 1120 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1115 1121 **/
1116 -s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1117 - u8 *eeprom_data)
1122 +static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1123 + u8 byte_offset, u8 *eeprom_data)
1118 1124 {
1119 1125 s32 status = IXGBE_SUCCESS;
1120 1126 u16 sfp_addr = 0;
1121 1127 u16 sfp_data = 0;
1122 1128 u16 sfp_stat = 0;
1129 + u16 gssr;
1123 1130 u32 i;
1124 1131
1125 - DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1132 + DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1126 1133
1134 + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1135 + gssr = IXGBE_GSSR_PHY1_SM;
1136 + else
1137 + gssr = IXGBE_GSSR_PHY0_SM;
1138 +
1139 + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1140 + return IXGBE_ERR_SWFW_SYNC;
1141 +
1127 1142 if (hw->phy.type == ixgbe_phy_nl) {
1128 1143 /*
1129 1144 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1130 1145 * 0xC30D. These registers are used to talk to the SFP+
1131 1146 * module's EEPROM through the SDA/SCL (I2C) interface.
1132 1147 */
1133 - sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1148 + sfp_addr = (dev_addr << 8) + byte_offset;
1134 1149 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1135 - hw->phy.ops.write_reg(hw,
1136 - IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1137 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1138 - sfp_addr);
1150 + hw->phy.ops.write_reg_mdi(hw,
1151 + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1152 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1153 + sfp_addr);
1139 1154
1140 1155 /* Poll status */
1141 1156 for (i = 0; i < 100; i++) {
1142 - hw->phy.ops.read_reg(hw,
1143 - IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1144 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1145 - &sfp_stat);
1157 + hw->phy.ops.read_reg_mdi(hw,
1158 + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1159 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1160 + &sfp_stat);
1146 1161 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1147 1162 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1148 1163 break;
1149 1164 msec_delay(10);
1150 1165 }
1151 1166
1152 1167 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1153 1168 DEBUGOUT("EEPROM read did not pass.\n");
1154 1169 status = IXGBE_ERR_SFP_NOT_PRESENT;
1155 1170 goto out;
1156 1171 }
1157 1172
1158 1173 /* Read data */
1159 - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1160 - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1174 + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1175 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1161 1176
1162 1177 *eeprom_data = (u8)(sfp_data >> 8);
1163 1178 } else {
1164 1179 status = IXGBE_ERR_PHY;
1165 - goto out;
1166 1180 }
1167 1181
1168 1182 out:
1183 + hw->mac.ops.release_swfw_sync(hw, gssr);
1169 1184 return status;
1170 1185 }
1171 1186
1172 1187 /**
1188 + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1189 + * @hw: pointer to hardware structure
1190 + * @byte_offset: EEPROM byte offset to read
1191 + * @eeprom_data: value read
1192 + *
1193 + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1194 + **/
1195 +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1196 + u8 *eeprom_data)
1197 +{
1198 + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1199 + byte_offset, eeprom_data);
1200 +}
1201 +
1202 +/**
1203 + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1204 + * @hw: pointer to hardware structure
1205 + * @byte_offset: byte offset at address 0xA2
1206 + * @eeprom_data: value read
1207 + *
1208 + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1209 + **/
1210 +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1211 + u8 *sff8472_data)
1212 +{
1213 + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1214 + byte_offset, sff8472_data);
1215 +}
1216 +
1217 +/**
1173 1218 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1174 1219 * @hw: pointer to hardware structure
1175 1220 *
1176 1221 * Determines physical layer capabilities of the current configuration.
1177 1222 **/
1178 1223 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1179 1224 {
1180 1225 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1181 1226 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1182 1227 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1183 1228 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1184 1229 u16 ext_ability = 0;
1185 1230
1186 1231 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1187 1232
1188 1233 hw->phy.ops.identify(hw);
1189 1234
1190 1235 /* Copper PHY must be checked before AUTOC LMS to determine correct
1191 1236 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1192 1237 switch (hw->phy.type) {
1193 1238 case ixgbe_phy_tn:
1194 1239 case ixgbe_phy_cu_unknown:
1195 1240 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1196 1241 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1197 1242 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1198 1243 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1199 1244 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1200 1245 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1201 1246 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1202 1247 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1203 1248 goto out;
1204 1249 default:
1205 1250 break;
1206 1251 }
1207 1252
1208 1253 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1209 1254 case IXGBE_AUTOC_LMS_1G_AN:
1210 1255 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1211 1256 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1212 1257 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1213 1258 else
1214 1259 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1215 1260 break;
1216 1261 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1217 1262 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1218 1263 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1219 1264 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1220 1265 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1221 1266 else /* XAUI */
1222 1267 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1223 1268 break;
1224 1269 case IXGBE_AUTOC_LMS_KX4_AN:
1225 1270 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1226 1271 if (autoc & IXGBE_AUTOC_KX_SUPP)
1227 1272 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1228 1273 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1229 1274 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1230 1275 break;
1231 1276 default:
1232 1277 break;
1233 1278 }
1234 1279
1235 1280 if (hw->phy.type == ixgbe_phy_nl) {
1236 1281 hw->phy.ops.identify_sfp(hw);
1237 1282
1238 1283 switch (hw->phy.sfp_type) {
1239 1284 case ixgbe_sfp_type_da_cu:
1240 1285 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1241 1286 break;
1242 1287 case ixgbe_sfp_type_sr:
1243 1288 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1244 1289 break;
1245 1290 case ixgbe_sfp_type_lr:
1246 1291 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1247 1292 break;
1248 1293 default:
1249 1294 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1250 1295 break;
1251 1296 }
1252 1297 }
1253 1298
1254 1299 switch (hw->device_id) {
1255 1300 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1256 1301 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1257 1302 break;
1258 1303 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1259 1304 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1260 1305 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1261 1306 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1262 1307 break;
1263 1308 case IXGBE_DEV_ID_82598EB_XF_LR:
1264 1309 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1265 1310 break;
1266 1311 default:
1267 1312 break;
1268 1313 }
1269 1314
1270 1315 out:
1271 1316 return physical_layer;
1272 1317 }
1273 1318
1274 1319 /**
1275 1320 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1276 1321 * port devices.
1277 1322 * @hw: pointer to the HW structure
1278 1323 *
1279 1324 * Calls common function and corrects issue with some single port devices
1280 1325 * that enable LAN1 but not LAN0.
1281 1326 **/
1282 1327 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1283 1328 {
1284 1329 struct ixgbe_bus_info *bus = &hw->bus;
1285 1330 u16 pci_gen = 0;
1286 1331 u16 pci_ctrl2 = 0;
1287 1332
1288 1333 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1289 1334
1290 1335 ixgbe_set_lan_id_multi_port_pcie(hw);
1291 1336
1292 1337 /* check if LAN0 is disabled */
1293 1338 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1294 1339 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1295 1340
1296 1341 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1297 1342
1298 1343 /* if LAN0 is completely disabled force function to 0 */
1299 1344 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1300 1345 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1301 1346 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1302 1347
1303 1348 bus->func = 0;
1304 1349 }
1305 1350 }
1306 1351 }
1307 1352
1308 1353 /**
1309 1354 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1310 1355 * @hw: pointer to hardware structure
1311 1356 *
1312 1357 **/
1313 1358 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1314 1359 {
1315 1360 u32 regval;
1316 1361 u32 i;
1317 1362
1318 1363 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1319 1364
1320 1365 /* Enable relaxed ordering */
1321 1366 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1322 1367 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1323 1368 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1324 1369 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1325 1370 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1326 1371 }
1327 1372
1328 1373 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1329 1374 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1330 1375 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1331 1376 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1332 1377 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1333 1378 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1334 1379 }
1335 1380
1336 1381 }
1337 1382
1338 1383 /**
1339 1384 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1340 1385 * @hw: pointer to hardware structure
1341 1386 * @num_pb: number of packet buffers to allocate
1342 1387 * @headroom: reserve n KB of headroom
1343 1388 * @strategy: packet buffer allocation strategy
1344 1389 **/
1345 1390 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1346 1391 u32 headroom, int strategy)
1347 1392 {
1348 1393 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1349 1394 u8 i = 0;
1350 1395 UNREFERENCED_1PARAMETER(headroom);
1351 1396
1352 1397 if (!num_pb)
1353 1398 return;
1354 1399
↓ open down ↓ |
172 lines elided |
↑ open up ↑ |
1355 1400 /* Setup Rx packet buffer sizes */
1356 1401 switch (strategy) {
1357 1402 case PBA_STRATEGY_WEIGHTED:
1358 1403 /* Setup the first four at 80KB */
1359 1404 rxpktsize = IXGBE_RXPBSIZE_80KB;
1360 1405 for (; i < 4; i++)
1361 1406 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1362 1407 /* Setup the last four at 48KB...don't re-init i */
1363 1408 rxpktsize = IXGBE_RXPBSIZE_48KB;
1364 1409 /* Fall Through */
1365 - /* FALLTHRU */
1366 1410 case PBA_STRATEGY_EQUAL:
1367 1411 default:
1368 1412 /* Divide the remaining Rx packet buffer evenly among the TCs */
1369 1413 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1370 1414 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1371 1415 break;
1372 1416 }
1373 1417
1374 1418 /* Setup Tx packet buffer sizes */
1375 1419 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1376 1420 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1421 +}
1422 +
1423 +/**
1424 + * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1425 + * @hw: pointer to hardware structure
1426 + * @regval: register value to write to RXCTRL
1427 + *
1428 + * Enables the Rx DMA unit
1429 + **/
1430 +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1431 +{
1432 + DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1433 +
1434 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1435 +
1436 + return IXGBE_SUCCESS;
1377 1437 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX