Print this page
6064 ixgbe needs X550 support

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/ixgbe/core/ixgbe_82599.c
          +++ new/usr/src/uts/common/io/ixgbe/core/ixgbe_82599.c
   1    1  /******************************************************************************
   2    2  
   3      -  Copyright (c) 2001-2012, Intel Corporation 
        3 +  Copyright (c) 2001-2015, Intel Corporation 
   4    4    All rights reserved.
   5    5    
   6    6    Redistribution and use in source and binary forms, with or without 
   7    7    modification, are permitted provided that the following conditions are met:
   8    8    
   9    9     1. Redistributions of source code must retain the above copyright notice, 
  10   10        this list of conditions and the following disclaimer.
  11   11    
  12   12     2. Redistributions in binary form must reproduce the above copyright 
  13   13        notice, this list of conditions and the following disclaimer in the 
↓ open down ↓ 9 lines elided ↑ open up ↑
  23   23    ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
  24   24    LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
  25   25    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
  26   26    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
  27   27    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
  28   28    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
  29   29    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30   30    POSSIBILITY OF SUCH DAMAGE.
  31   31  
  32   32  ******************************************************************************/
  33      -/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
       33 +/*$FreeBSD$*/
  34   34  
  35   35  #include "ixgbe_type.h"
  36   36  #include "ixgbe_82599.h"
  37   37  #include "ixgbe_api.h"
  38   38  #include "ixgbe_common.h"
  39   39  #include "ixgbe_phy.h"
  40   40  
       41 +#define IXGBE_82599_MAX_TX_QUEUES 128
       42 +#define IXGBE_82599_MAX_RX_QUEUES 128
       43 +#define IXGBE_82599_RAR_ENTRIES   128
       44 +#define IXGBE_82599_MC_TBL_SIZE   128
       45 +#define IXGBE_82599_VFT_TBL_SIZE  128
       46 +#define IXGBE_82599_RX_PB_SIZE    512
       47 +
  41   48  static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
  42   49                                           ixgbe_link_speed speed,
  43      -                                         bool autoneg,
  44   50                                           bool autoneg_wait_to_complete);
  45   51  static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
  46   52  static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
  47   53                                     u16 offset, u16 *data);
  48   54  static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
  49   55                                            u16 words, u16 *data);
       56 +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
       57 +                                        u8 dev_addr, u8 *data);
       58 +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
       59 +                                        u8 dev_addr, u8 data);
  50   60  
  51   61  void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
  52   62  {
  53   63          struct ixgbe_mac_info *mac = &hw->mac;
  54   64  
  55   65          DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
  56   66  
  57      -        /* enable the laser control functions for SFP+ fiber */
  58      -        if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
       67 +        /*
       68 +         * enable the laser control functions for SFP+ fiber
       69 +         * and MNG not enabled
       70 +         */
       71 +        if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
       72 +            !ixgbe_mng_enabled(hw)) {
  59   73                  mac->ops.disable_tx_laser =
  60      -                                       &ixgbe_disable_tx_laser_multispeed_fiber;
       74 +                                       ixgbe_disable_tx_laser_multispeed_fiber;
  61   75                  mac->ops.enable_tx_laser =
  62      -                                        &ixgbe_enable_tx_laser_multispeed_fiber;
  63      -                mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
       76 +                                        ixgbe_enable_tx_laser_multispeed_fiber;
       77 +                mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
  64   78  
  65   79          } else {
  66   80                  mac->ops.disable_tx_laser = NULL;
  67   81                  mac->ops.enable_tx_laser = NULL;
  68   82                  mac->ops.flap_tx_laser = NULL;
  69   83          }
  70   84  
  71   85          if (hw->phy.multispeed_fiber) {
  72   86                  /* Set up dual speed SFP+ support */
  73      -                mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
       87 +                mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
       88 +                mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
       89 +                mac->ops.set_rate_select_speed =
       90 +                                               ixgbe_set_hard_rate_select_speed;
       91 +                if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
       92 +                        mac->ops.set_rate_select_speed =
       93 +                                               ixgbe_set_soft_rate_select_speed;
  74   94          } else {
  75   95                  if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
  76   96                       (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
  77   97                        hw->phy.smart_speed == ixgbe_smart_speed_on) &&
  78   98                        !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
  79      -                        mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
       99 +                        mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
  80  100                  } else {
  81      -                        mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
      101 +                        mac->ops.setup_link = ixgbe_setup_mac_link_82599;
  82  102                  }
  83  103          }
  84  104  }
  85  105  
  86  106  /**
  87  107   *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
  88  108   *  @hw: pointer to hardware structure
  89  109   *
  90  110   *  Initialize any function pointers that were not able to be
  91  111   *  set during init_shared_code because the PHY/SFP type was
  92  112   *  not known.  Perform the SFP init if necessary.
  93  113   *
  94  114   **/
  95  115  s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
  96  116  {
  97  117          struct ixgbe_mac_info *mac = &hw->mac;
  98  118          struct ixgbe_phy_info *phy = &hw->phy;
  99  119          s32 ret_val = IXGBE_SUCCESS;
      120 +        u32 esdp;
 100  121  
 101  122          DEBUGFUNC("ixgbe_init_phy_ops_82599");
 102  123  
      124 +        if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
      125 +                /* Store flag indicating I2C bus access control unit. */
      126 +                hw->phy.qsfp_shared_i2c_bus = TRUE;
      127 +
      128 +                /* Initialize access to QSFP+ I2C bus */
      129 +                esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
      130 +                esdp |= IXGBE_ESDP_SDP0_DIR;
      131 +                esdp &= ~IXGBE_ESDP_SDP1_DIR;
      132 +                esdp &= ~IXGBE_ESDP_SDP0;
      133 +                esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
      134 +                esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
      135 +                IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
      136 +                IXGBE_WRITE_FLUSH(hw);
      137 +
      138 +                phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
      139 +                phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
      140 +        }
 103  141          /* Identify the PHY or SFP module */
 104  142          ret_val = phy->ops.identify(hw);
 105  143          if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
 106  144                  goto init_phy_ops_out;
 107  145  
 108  146          /* Setup function pointers based on detected SFP module and speeds */
 109  147          ixgbe_init_mac_link_ops_82599(hw);
 110  148          if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
 111  149                  hw->phy.ops.reset = NULL;
 112  150  
 113  151          /* If copper media, overwrite with copper function pointers */
 114  152          if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
 115      -                mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
      153 +                mac->ops.setup_link = ixgbe_setup_copper_link_82599;
 116  154                  mac->ops.get_link_capabilities =
 117      -                                  &ixgbe_get_copper_link_capabilities_generic;
      155 +                                  ixgbe_get_copper_link_capabilities_generic;
 118  156          }
 119  157  
 120      -        /* Set necessary function pointers based on phy type */
      158 +        /* Set necessary function pointers based on PHY type */
 121  159          switch (hw->phy.type) {
 122  160          case ixgbe_phy_tn:
 123      -                phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
 124      -                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
      161 +                phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
      162 +                phy->ops.check_link = ixgbe_check_phy_link_tnx;
 125  163                  phy->ops.get_firmware_version =
 126      -                             &ixgbe_get_phy_firmware_version_tnx;
      164 +                             ixgbe_get_phy_firmware_version_tnx;
 127  165                  break;
 128  166          default:
 129  167                  break;
 130  168          }
 131  169  init_phy_ops_out:
 132  170          return ret_val;
 133  171  }
 134  172  
 135  173  s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 136  174  {
 137  175          s32 ret_val = IXGBE_SUCCESS;
 138      -        u32 reg_anlp1 = 0;
 139      -        u32 i = 0;
 140  176          u16 list_offset, data_offset, data_value;
 141  177  
 142  178          DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
 143  179  
 144  180          if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
 145  181                  ixgbe_init_mac_link_ops_82599(hw);
 146  182  
 147  183                  hw->phy.ops.reset = NULL;
 148  184  
 149  185                  ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
↓ open down ↓ 2 lines elided ↑ open up ↑
 152  188                          goto setup_sfp_out;
 153  189  
 154  190                  /* PHY config will finish before releasing the semaphore */
 155  191                  ret_val = hw->mac.ops.acquire_swfw_sync(hw,
 156  192                                                          IXGBE_GSSR_MAC_CSR_SM);
 157  193                  if (ret_val != IXGBE_SUCCESS) {
 158  194                          ret_val = IXGBE_ERR_SWFW_SYNC;
 159  195                          goto setup_sfp_out;
 160  196                  }
 161  197  
 162      -                hw->eeprom.ops.read(hw, ++data_offset, &data_value);
      198 +                if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
      199 +                        goto setup_sfp_err;
 163  200                  while (data_value != 0xffff) {
 164  201                          IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
 165  202                          IXGBE_WRITE_FLUSH(hw);
 166      -                        hw->eeprom.ops.read(hw, ++data_offset, &data_value);
      203 +                        if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
      204 +                                goto setup_sfp_err;
 167  205                  }
 168  206  
 169  207                  /* Release the semaphore */
 170  208                  hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
 171      -                /* Delay obtaining semaphore again to allow FW access */
      209 +                /* Delay obtaining semaphore again to allow FW access
      210 +                 * prot_autoc_write uses the semaphore too.
      211 +                 */
 172  212                  msec_delay(hw->eeprom.semaphore_delay);
 173  213  
 174      -                /* Now restart DSP by setting Restart_AN and clearing LMS */
 175      -                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
 176      -                                IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
 177      -                                IXGBE_AUTOC_AN_RESTART));
      214 +                /* Restart DSP and set SFI mode */
      215 +                ret_val = hw->mac.ops.prot_autoc_write(hw,
      216 +                        hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
      217 +                        FALSE);
 178  218  
 179      -                /* Wait for AN to leave state 0 */
 180      -                for (i = 0; i < 10; i++) {
 181      -                        msec_delay(4);
 182      -                        reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 183      -                        if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
 184      -                                break;
 185      -                }
 186      -                if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
      219 +                if (ret_val) {
 187  220                          DEBUGOUT("sfp module setup not complete\n");
 188  221                          ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
 189  222                          goto setup_sfp_out;
 190  223                  }
 191  224  
 192      -                /* Restart DSP by setting Restart_AN and return to SFI mode */
 193      -                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
 194      -                                IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
 195      -                                IXGBE_AUTOC_AN_RESTART));
 196  225          }
 197  226  
 198  227  setup_sfp_out:
 199  228          return ret_val;
      229 +
      230 +setup_sfp_err:
      231 +        /* Release the semaphore */
      232 +        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
      233 +        /* Delay obtaining semaphore again to allow FW access */
      234 +        msec_delay(hw->eeprom.semaphore_delay);
      235 +        ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
      236 +                      "eeprom read at offset %d failed", data_offset);
      237 +        return IXGBE_ERR_PHY;
 200  238  }
 201  239  
 202  240  /**
      241 + *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
      242 + *  @hw: pointer to hardware structure
      243 + *  @locked: Return the if we locked for this read.
      244 + *  @reg_val: Value we read from AUTOC
      245 + *
      246 + *  For this part (82599) we need to wrap read-modify-writes with a possible
      247 + *  FW/SW lock.  It is assumed this lock will be freed with the next
      248 + *  prot_autoc_write_82599().
      249 + */
      250 +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
      251 +{
      252 +        s32 ret_val;
      253 +
      254 +        *locked = FALSE;
      255 +         /* If LESM is on then we need to hold the SW/FW semaphore. */
      256 +        if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
      257 +                ret_val = hw->mac.ops.acquire_swfw_sync(hw,
      258 +                                        IXGBE_GSSR_MAC_CSR_SM);
      259 +                if (ret_val != IXGBE_SUCCESS)
      260 +                        return IXGBE_ERR_SWFW_SYNC;
      261 +
      262 +                *locked = TRUE;
      263 +        }
      264 +
      265 +        *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
      266 +        return IXGBE_SUCCESS;
      267 +}
      268 +
      269 +/**
      270 + * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
      271 + * @hw: pointer to hardware structure
      272 + * @reg_val: value to write to AUTOC
      273 + * @locked: bool to indicate whether the SW/FW lock was already taken by
      274 + *           previous proc_autoc_read_82599.
      275 + *
      276 + * This part (82599) may need to hold the SW/FW lock around all writes to
      277 + * AUTOC. Likewise after a write we need to do a pipeline reset.
      278 + */
      279 +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
      280 +{
      281 +        s32 ret_val = IXGBE_SUCCESS;
      282 +
      283 +        /* Blocked by MNG FW so bail */
      284 +        if (ixgbe_check_reset_blocked(hw))
      285 +                goto out;
      286 +
      287 +        /* We only need to get the lock if:
      288 +         *  - We didn't do it already (in the read part of a read-modify-write)
      289 +         *  - LESM is enabled.
      290 +         */
      291 +        if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
      292 +                ret_val = hw->mac.ops.acquire_swfw_sync(hw,
      293 +                                        IXGBE_GSSR_MAC_CSR_SM);
      294 +                if (ret_val != IXGBE_SUCCESS)
      295 +                        return IXGBE_ERR_SWFW_SYNC;
      296 +
      297 +                locked = TRUE;
      298 +        }
      299 +
      300 +        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
      301 +        ret_val = ixgbe_reset_pipeline_82599(hw);
      302 +
      303 +out:
      304 +        /* Free the SW/FW semaphore as we either grabbed it here or
      305 +         * already had it when this function was called.
      306 +         */
      307 +        if (locked)
      308 +                hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
      309 +
      310 +        return ret_val;
      311 +}
      312 +
      313 +/**
 203  314   *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
 204  315   *  @hw: pointer to hardware structure
 205  316   *
 206  317   *  Initialize the function pointers and assign the MAC type for 82599.
 207  318   *  Does not touch the hardware.
 208  319   **/
 209  320  
 210  321  s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
 211  322  {
 212  323          struct ixgbe_mac_info *mac = &hw->mac;
 213  324          struct ixgbe_phy_info *phy = &hw->phy;
 214  325          struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
 215  326          s32 ret_val;
 216  327  
 217  328          DEBUGFUNC("ixgbe_init_ops_82599");
 218  329  
 219      -        ret_val = ixgbe_init_phy_ops_generic(hw);
      330 +        ixgbe_init_phy_ops_generic(hw);
 220  331          ret_val = ixgbe_init_ops_generic(hw);
 221  332  
 222  333          /* PHY */
 223      -        phy->ops.identify = &ixgbe_identify_phy_82599;
 224      -        phy->ops.init = &ixgbe_init_phy_ops_82599;
      334 +        phy->ops.identify = ixgbe_identify_phy_82599;
      335 +        phy->ops.init = ixgbe_init_phy_ops_82599;
 225  336  
 226  337          /* MAC */
 227      -        mac->ops.reset_hw = &ixgbe_reset_hw_82599;
 228      -        mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
 229      -        mac->ops.get_media_type = &ixgbe_get_media_type_82599;
      338 +        mac->ops.reset_hw = ixgbe_reset_hw_82599;
      339 +        mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
      340 +        mac->ops.get_media_type = ixgbe_get_media_type_82599;
 230  341          mac->ops.get_supported_physical_layer =
 231      -                                    &ixgbe_get_supported_physical_layer_82599;
 232      -        mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
 233      -        mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
 234      -        mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
 235      -        mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
 236      -        mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
 237      -        mac->ops.start_hw = &ixgbe_start_hw_82599;
 238      -        mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
 239      -        mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
 240      -        mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
 241      -        mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
 242      -        mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
      342 +                                    ixgbe_get_supported_physical_layer_82599;
      343 +        mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
      344 +        mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
      345 +        mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
      346 +        mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
      347 +        mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
      348 +        mac->ops.start_hw = ixgbe_start_hw_82599;
      349 +        mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
      350 +        mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
      351 +        mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
      352 +        mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
      353 +        mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
      354 +        mac->ops.prot_autoc_read = prot_autoc_read_82599;
      355 +        mac->ops.prot_autoc_write = prot_autoc_write_82599;
 243  356  
 244  357          /* RAR, Multicast, VLAN */
 245      -        mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
 246      -        mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
 247      -        mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
 248      -        mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
      358 +        mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
      359 +        mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
      360 +        mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
      361 +        mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
 249  362          mac->rar_highwater = 1;
 250      -        mac->ops.set_vfta = &ixgbe_set_vfta_generic;
 251      -        mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
 252      -        mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
 253      -        mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
 254      -        mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
 255      -        mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
 256      -        mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
      363 +        mac->ops.set_vfta = ixgbe_set_vfta_generic;
      364 +        mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
      365 +        mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
      366 +        mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
      367 +        mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
      368 +        mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
      369 +        mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
 257  370  
 258  371          /* Link */
 259      -        mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
 260      -        mac->ops.check_link = &ixgbe_check_mac_link_generic;
 261      -        mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
      372 +        mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
      373 +        mac->ops.check_link = ixgbe_check_mac_link_generic;
      374 +        mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
 262  375          ixgbe_init_mac_link_ops_82599(hw);
 263  376  
 264      -        mac->mcft_size          = 128;
 265      -        mac->vft_size           = 128;
 266      -        mac->num_rar_entries    = 128;
 267      -        mac->rx_pb_size         = 512;
 268      -        mac->max_tx_queues      = 128;
 269      -        mac->max_rx_queues      = 128;
      377 +        mac->mcft_size          = IXGBE_82599_MC_TBL_SIZE;
      378 +        mac->vft_size           = IXGBE_82599_VFT_TBL_SIZE;
      379 +        mac->num_rar_entries    = IXGBE_82599_RAR_ENTRIES;
      380 +        mac->rx_pb_size         = IXGBE_82599_RX_PB_SIZE;
      381 +        mac->max_rx_queues      = IXGBE_82599_MAX_RX_QUEUES;
      382 +        mac->max_tx_queues      = IXGBE_82599_MAX_TX_QUEUES;
 270  383          mac->max_msix_vectors   = ixgbe_get_pcie_msix_count_generic(hw);
 271  384  
 272      -        mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
 273      -                                   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
      385 +        mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
      386 +                                      & IXGBE_FWSM_MODE_MASK);
 274  387  
 275  388          hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
 276  389  
 277  390          /* EEPROM */
 278      -        eeprom->ops.read = &ixgbe_read_eeprom_82599;
 279      -        eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
      391 +        eeprom->ops.read = ixgbe_read_eeprom_82599;
      392 +        eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
 280  393  
 281  394          /* Manageability interface */
 282      -        mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
      395 +        mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
 283  396  
 284  397  
      398 +        mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
      399 +
 285  400          return ret_val;
 286  401  }
 287  402  
 288  403  /**
 289  404   *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
 290  405   *  @hw: pointer to hardware structure
 291  406   *  @speed: pointer to link speed
 292      - *  @negotiation: TRUE when autoneg or autotry is enabled
      407 + *  @autoneg: TRUE when autoneg or autotry is enabled
 293  408   *
 294  409   *  Determines the link capabilities by reading the AUTOC register.
 295  410   **/
 296  411  s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
 297  412                                        ixgbe_link_speed *speed,
 298      -                                      bool *negotiation)
      413 +                                      bool *autoneg)
 299  414  {
 300  415          s32 status = IXGBE_SUCCESS;
 301  416          u32 autoc = 0;
 302  417  
 303  418          DEBUGFUNC("ixgbe_get_link_capabilities_82599");
 304  419  
 305  420  
 306  421          /* Check if 1G SFP module. */
 307  422          if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
 308  423              hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
 309  424              hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
 310  425              hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
 311  426              hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
 312  427              hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
 313  428                  *speed = IXGBE_LINK_SPEED_1GB_FULL;
 314      -                *negotiation = TRUE;
      429 +                *autoneg = TRUE;
 315  430                  goto out;
 316  431          }
 317  432  
 318  433          /*
 319  434           * Determine link capabilities based on the stored value of AUTOC,
 320  435           * which represents EEPROM defaults.  If AUTOC value has not
 321  436           * been stored, use the current register values.
 322  437           */
 323  438          if (hw->mac.orig_link_settings_stored)
 324  439                  autoc = hw->mac.orig_autoc;
 325  440          else
 326  441                  autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 327  442  
 328  443          switch (autoc & IXGBE_AUTOC_LMS_MASK) {
 329  444          case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
 330  445                  *speed = IXGBE_LINK_SPEED_1GB_FULL;
 331      -                *negotiation = FALSE;
      446 +                *autoneg = FALSE;
 332  447                  break;
 333  448  
 334  449          case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
 335  450                  *speed = IXGBE_LINK_SPEED_10GB_FULL;
 336      -                *negotiation = FALSE;
      451 +                *autoneg = FALSE;
 337  452                  break;
 338  453  
 339  454          case IXGBE_AUTOC_LMS_1G_AN:
 340  455                  *speed = IXGBE_LINK_SPEED_1GB_FULL;
 341      -                *negotiation = TRUE;
      456 +                *autoneg = TRUE;
 342  457                  break;
 343  458  
 344  459          case IXGBE_AUTOC_LMS_10G_SERIAL:
 345  460                  *speed = IXGBE_LINK_SPEED_10GB_FULL;
 346      -                *negotiation = FALSE;
      461 +                *autoneg = FALSE;
 347  462                  break;
 348  463  
 349  464          case IXGBE_AUTOC_LMS_KX4_KX_KR:
 350  465          case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
 351  466                  *speed = IXGBE_LINK_SPEED_UNKNOWN;
 352  467                  if (autoc & IXGBE_AUTOC_KR_SUPP)
 353  468                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 354  469                  if (autoc & IXGBE_AUTOC_KX4_SUPP)
 355  470                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 356  471                  if (autoc & IXGBE_AUTOC_KX_SUPP)
 357  472                          *speed |= IXGBE_LINK_SPEED_1GB_FULL;
 358      -                *negotiation = TRUE;
      473 +                *autoneg = TRUE;
 359  474                  break;
 360  475  
 361  476          case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
 362  477                  *speed = IXGBE_LINK_SPEED_100_FULL;
 363  478                  if (autoc & IXGBE_AUTOC_KR_SUPP)
 364  479                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 365  480                  if (autoc & IXGBE_AUTOC_KX4_SUPP)
 366  481                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 367  482                  if (autoc & IXGBE_AUTOC_KX_SUPP)
 368  483                          *speed |= IXGBE_LINK_SPEED_1GB_FULL;
 369      -                *negotiation = TRUE;
      484 +                *autoneg = TRUE;
 370  485                  break;
 371  486  
 372  487          case IXGBE_AUTOC_LMS_SGMII_1G_100M:
 373  488                  *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
 374      -                *negotiation = FALSE;
      489 +                *autoneg = FALSE;
 375  490                  break;
 376  491  
 377  492          default:
 378  493                  status = IXGBE_ERR_LINK_SETUP;
 379  494                  goto out;
      495 +                break;
 380  496          }
 381  497  
 382  498          if (hw->phy.multispeed_fiber) {
 383  499                  *speed |= IXGBE_LINK_SPEED_10GB_FULL |
 384  500                            IXGBE_LINK_SPEED_1GB_FULL;
 385      -                *negotiation = TRUE;
      501 +
      502 +                /* QSFP must not enable full auto-negotiation
      503 +                 * Limited autoneg is enabled at 1G
      504 +                 */
      505 +                if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
      506 +                        *autoneg = FALSE;
      507 +                else
      508 +                        *autoneg = TRUE;
 386  509          }
 387  510  
 388  511  out:
 389  512          return status;
 390  513  }
 391  514  
 392  515  /**
 393  516   *  ixgbe_get_media_type_82599 - Get media type
 394  517   *  @hw: pointer to hardware structure
 395  518   *
↓ open down ↓ 22 lines elided ↑ open up ↑
 418  541          case IXGBE_DEV_ID_82599_KR:
 419  542          case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
 420  543          case IXGBE_DEV_ID_82599_XAUI_LOM:
 421  544                  /* Default device ID is mezzanine card KX/KX4 */
 422  545                  media_type = ixgbe_media_type_backplane;
 423  546                  break;
 424  547          case IXGBE_DEV_ID_82599_SFP:
 425  548          case IXGBE_DEV_ID_82599_SFP_FCOE:
 426  549          case IXGBE_DEV_ID_82599_SFP_EM:
 427  550          case IXGBE_DEV_ID_82599_SFP_SF2:
      551 +        case IXGBE_DEV_ID_82599_SFP_SF_QP:
 428  552          case IXGBE_DEV_ID_82599EN_SFP:
 429  553                  media_type = ixgbe_media_type_fiber;
 430  554                  break;
 431  555          case IXGBE_DEV_ID_82599_CX4:
 432  556                  media_type = ixgbe_media_type_cx4;
 433  557                  break;
 434  558          case IXGBE_DEV_ID_82599_T3_LOM:
 435  559                  media_type = ixgbe_media_type_copper;
 436  560                  break;
      561 +        case IXGBE_DEV_ID_82599_QSFP_SF_QP:
      562 +                media_type = ixgbe_media_type_fiber_qsfp;
      563 +                break;
      564 +        case IXGBE_DEV_ID_82599_BYPASS:
      565 +                media_type = ixgbe_media_type_fiber_fixed;
      566 +                hw->phy.multispeed_fiber = TRUE;
      567 +                break;
 437  568          default:
 438  569                  media_type = ixgbe_media_type_unknown;
 439  570                  break;
 440  571          }
 441  572  out:
 442  573          return media_type;
 443  574  }
 444  575  
 445  576  /**
      577 + *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
      578 + *  @hw: pointer to hardware structure
      579 + *
      580 + *  Disables link during D3 power down sequence.
      581 + *
      582 + **/
      583 +void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
      584 +{
      585 +        u32 autoc2_reg;
      586 +        u16 ee_ctrl_2 = 0;
      587 +
      588 +        DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
      589 +        ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
      590 +
      591 +        if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
      592 +            ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
      593 +                autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
      594 +                autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
      595 +                IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
      596 +        }
      597 +}
      598 +
      599 +/**
 446  600   *  ixgbe_start_mac_link_82599 - Setup MAC link settings
 447  601   *  @hw: pointer to hardware structure
 448  602   *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
 449  603   *
 450  604   *  Configures link settings based on values in the ixgbe_hw struct.
 451  605   *  Restarts the link.  Performs autonegotiation if needed.
 452  606   **/
 453  607  s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
 454  608                                 bool autoneg_wait_to_complete)
 455  609  {
 456  610          u32 autoc_reg;
 457  611          u32 links_reg;
 458  612          u32 i;
 459  613          s32 status = IXGBE_SUCCESS;
      614 +        bool got_lock = FALSE;
 460  615  
 461  616          DEBUGFUNC("ixgbe_start_mac_link_82599");
 462  617  
 463  618  
      619 +        /*  reset_pipeline requires us to hold this lock as it writes to
      620 +         *  AUTOC.
      621 +         */
      622 +        if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
      623 +                status = hw->mac.ops.acquire_swfw_sync(hw,
      624 +                                                       IXGBE_GSSR_MAC_CSR_SM);
      625 +                if (status != IXGBE_SUCCESS)
      626 +                        goto out;
      627 +
      628 +                got_lock = TRUE;
      629 +        }
      630 +
 464  631          /* Restart link */
 465      -        autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 466      -        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 467      -        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
      632 +        ixgbe_reset_pipeline_82599(hw);
 468  633  
      634 +        if (got_lock)
      635 +                hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
      636 +
 469  637          /* Only poll for autoneg to complete if specified to do so */
 470  638          if (autoneg_wait_to_complete) {
      639 +                autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 471  640                  if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 472  641                       IXGBE_AUTOC_LMS_KX4_KX_KR ||
 473  642                      (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 474  643                       IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
 475  644                      (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 476  645                       IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 477  646                          links_reg = 0; /* Just in case Autoneg time = 0 */
 478  647                          for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
 479  648                                  links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
 480  649                                  if (links_reg & IXGBE_LINKS_KX_AN_COMP)
↓ open down ↓ 3 lines elided ↑ open up ↑
 484  653                          if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
 485  654                                  status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
 486  655                                  DEBUGOUT("Autoneg did not complete.\n");
 487  656                          }
 488  657                  }
 489  658          }
 490  659  
 491  660          /* Add delay to filter out noises during initial link setup */
 492  661          msec_delay(50);
 493  662  
      663 +out:
 494  664          return status;
 495  665  }
 496  666  
 497  667  /**
 498  668   *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
 499  669   *  @hw: pointer to hardware structure
 500  670   *
 501  671   *  The base drivers may require better control over SFP+ module
 502  672   *  PHY states.  This includes selectively shutting down the Tx
 503  673   *  laser on the PHY, effectively halting physical link.
 504  674   **/
 505  675  void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 506  676  {
 507  677          u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 508  678  
 509      -        /* Disable tx laser; allow 100us to go dark per spec */
      679 +        /* Blocked by MNG FW so bail */
      680 +        if (ixgbe_check_reset_blocked(hw))
      681 +                return;
      682 +
      683 +        /* Disable Tx laser; allow 100us to go dark per spec */
 510  684          esdp_reg |= IXGBE_ESDP_SDP3;
 511  685          IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 512  686          IXGBE_WRITE_FLUSH(hw);
 513  687          usec_delay(100);
 514  688  }
 515  689  
 516  690  /**
 517  691   *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
 518  692   *  @hw: pointer to hardware structure
 519  693   *
 520  694   *  The base drivers may require better control over SFP+ module
 521  695   *  PHY states.  This includes selectively turning on the Tx
 522  696   *  laser on the PHY, effectively starting physical link.
 523  697   **/
 524  698  void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 525  699  {
 526  700          u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 527  701  
 528      -        /* Enable tx laser; allow 100ms to light up */
      702 +        /* Enable Tx laser; allow 100ms to light up */
 529  703          esdp_reg &= ~IXGBE_ESDP_SDP3;
 530  704          IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 531  705          IXGBE_WRITE_FLUSH(hw);
 532  706          msec_delay(100);
 533  707  }
 534  708  
 535  709  /**
 536  710   *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
 537  711   *  @hw: pointer to hardware structure
 538  712   *
 539  713   *  When the driver changes the link speeds that it can support,
 540  714   *  it sets autotry_restart to TRUE to indicate that we need to
 541  715   *  initiate a new autotry session with the link partner.  To do
 542      - *  so, we set the speed then disable and re-enable the tx laser, to
      716 + *  so, we set the speed then disable and re-enable the Tx laser, to
 543  717   *  alert the link partner that it also needs to restart autotry on its
 544  718   *  end.  This is consistent with TRUE clause 37 autoneg, which also
 545  719   *  involves a loss of signal.
 546  720   **/
 547  721  void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 548  722  {
 549  723          DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
 550  724  
      725 +        /* Blocked by MNG FW so bail */
      726 +        if (ixgbe_check_reset_blocked(hw))
      727 +                return;
      728 +
 551  729          if (hw->mac.autotry_restart) {
 552  730                  ixgbe_disable_tx_laser_multispeed_fiber(hw);
 553  731                  ixgbe_enable_tx_laser_multispeed_fiber(hw);
 554  732                  hw->mac.autotry_restart = FALSE;
 555  733          }
 556  734  }
 557  735  
 558  736  /**
 559      - *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
      737 + *  ixgbe_set_hard_rate_select_speed - Set module link speed
 560  738   *  @hw: pointer to hardware structure
 561      - *  @speed: new link speed
 562      - *  @autoneg: TRUE if autonegotiation enabled
 563      - *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
      739 + *  @speed: link speed to set
 564  740   *
 565      - *  Set the link speed in the AUTOC register and restarts link.
 566      - **/
 567      -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
 568      -                                     ixgbe_link_speed speed, bool autoneg,
 569      -                                     bool autoneg_wait_to_complete)
      741 + *  Set module link speed via RS0/RS1 rate select pins.
      742 + */
      743 +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
      744 +                                        ixgbe_link_speed speed)
 570  745  {
 571      -        s32 status = IXGBE_SUCCESS;
 572      -        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
 573      -        ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
 574      -        u32 speedcnt = 0;
 575  746          u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 576      -        u32 i = 0;
 577      -        bool link_up = FALSE;
 578      -        bool negotiation;
 579  747  
 580      -        DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
 581      -
 582      -        /* Mask off requested but non-supported speeds */
 583      -        status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
 584      -        if (status != IXGBE_SUCCESS)
 585      -                return status;
 586      -
 587      -        speed &= link_speed;
 588      -
 589      -        /*
 590      -         * Try each speed one by one, highest priority first.  We do this in
 591      -         * software because 10gb fiber doesn't support speed autonegotiation.
 592      -         */
 593      -        if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
 594      -                speedcnt++;
 595      -                highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 596      -
 597      -                /* If we already have link at this speed, just jump out */
 598      -                status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 599      -                if (status != IXGBE_SUCCESS)
 600      -                        return status;
 601      -
 602      -                if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
 603      -                        goto out;
 604      -
 605      -                /* Set the module link speed */
      748 +        switch (speed) {
      749 +        case IXGBE_LINK_SPEED_10GB_FULL:
 606  750                  esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
 607      -                IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 608      -                IXGBE_WRITE_FLUSH(hw);
 609      -
 610      -                /* Allow module to change analog characteristics (1G->10G) */
 611      -                msec_delay(40);
 612      -
 613      -                status = ixgbe_setup_mac_link_82599(hw,
 614      -                                                    IXGBE_LINK_SPEED_10GB_FULL,
 615      -                                                    autoneg,
 616      -                                                    autoneg_wait_to_complete);
 617      -                if (status != IXGBE_SUCCESS)
 618      -                        return status;
 619      -
 620      -                /* Flap the tx laser if it has not already been done */
 621      -                ixgbe_flap_tx_laser(hw);
 622      -
 623      -                /*
 624      -                 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
 625      -                 * Section 73.10.2, we may have to wait up to 500ms if KR is
 626      -                 * attempted.  82599 uses the same timing for 10g SFI.
 627      -                 */
 628      -                for (i = 0; i < 5; i++) {
 629      -                        /* Wait for the link partner to also set speed */
 630      -                        msec_delay(100);
 631      -
 632      -                        /* If we have link, just jump out */
 633      -                        status = ixgbe_check_link(hw, &link_speed,
 634      -                                                  &link_up, FALSE);
 635      -                        if (status != IXGBE_SUCCESS)
 636      -                                return status;
 637      -
 638      -                        if (link_up)
 639      -                                goto out;
 640      -                }
 641      -        }
 642      -
 643      -        if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
 644      -                speedcnt++;
 645      -                if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
 646      -                        highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
 647      -
 648      -                /* If we already have link at this speed, just jump out */
 649      -                status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 650      -                if (status != IXGBE_SUCCESS)
 651      -                        return status;
 652      -
 653      -                if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
 654      -                        goto out;
 655      -
 656      -                /* Set the module link speed */
      751 +                break;
      752 +        case IXGBE_LINK_SPEED_1GB_FULL:
 657  753                  esdp_reg &= ~IXGBE_ESDP_SDP5;
 658  754                  esdp_reg |= IXGBE_ESDP_SDP5_DIR;
 659      -                IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 660      -                IXGBE_WRITE_FLUSH(hw);
 661      -
 662      -                /* Allow module to change analog characteristics (10G->1G) */
 663      -                msec_delay(40);
 664      -
 665      -                status = ixgbe_setup_mac_link_82599(hw,
 666      -                                                    IXGBE_LINK_SPEED_1GB_FULL,
 667      -                                                    autoneg,
 668      -                                                    autoneg_wait_to_complete);
 669      -                if (status != IXGBE_SUCCESS)
 670      -                        return status;
 671      -
 672      -                /* Flap the tx laser if it has not already been done */
 673      -                ixgbe_flap_tx_laser(hw);
 674      -
 675      -                /* Wait for the link partner to also set speed */
 676      -                msec_delay(100);
 677      -
 678      -                /* If we have link, just jump out */
 679      -                status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 680      -                if (status != IXGBE_SUCCESS)
 681      -                        return status;
 682      -
 683      -                if (link_up)
 684      -                        goto out;
      755 +                break;
      756 +        default:
      757 +                DEBUGOUT("Invalid fixed module speed\n");
      758 +                return;
 685  759          }
 686  760  
 687      -        /*
 688      -         * We didn't get link.  Configure back to the highest speed we tried,
 689      -         * (if there was more than one).  We call ourselves back with just the
 690      -         * single highest speed that the user requested.
 691      -         */
 692      -        if (speedcnt > 1)
 693      -                status = ixgbe_setup_mac_link_multispeed_fiber(hw,
 694      -                        highest_link_speed, autoneg, autoneg_wait_to_complete);
 695      -
 696      -out:
 697      -        /* Set autoneg_advertised value based on input link speed */
 698      -        hw->phy.autoneg_advertised = 0;
 699      -
 700      -        if (speed & IXGBE_LINK_SPEED_10GB_FULL)
 701      -                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 702      -
 703      -        if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 704      -                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 705      -
 706      -        return status;
      761 +        IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
      762 +        IXGBE_WRITE_FLUSH(hw);
 707  763  }
 708  764  
 709  765  /**
 710  766   *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
 711  767   *  @hw: pointer to hardware structure
 712  768   *  @speed: new link speed
 713      - *  @autoneg: TRUE if autonegotiation enabled
 714  769   *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
 715  770   *
 716  771   *  Implements the Intel SmartSpeed algorithm.
 717  772   **/
 718  773  s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 719      -                                    ixgbe_link_speed speed, bool autoneg,
      774 +                                    ixgbe_link_speed speed,
 720  775                                      bool autoneg_wait_to_complete)
 721  776  {
 722  777          s32 status = IXGBE_SUCCESS;
 723  778          ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
 724  779          s32 i, j;
 725  780          bool link_up = FALSE;
 726  781          u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 727  782  
 728  783          DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
 729  784  
↓ open down ↓ 12 lines elided ↑ open up ↑
 742  797          /*
 743  798           * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
 744  799           * autoneg advertisement if link is unable to be established at the
 745  800           * highest negotiated rate.  This can sometimes happen due to integrity
 746  801           * issues with the physical media connection.
 747  802           */
 748  803  
 749  804          /* First, try to get link with full advertisement */
 750  805          hw->phy.smart_speed_active = FALSE;
 751  806          for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
 752      -                status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
      807 +                status = ixgbe_setup_mac_link_82599(hw, speed,
 753  808                                                      autoneg_wait_to_complete);
 754  809                  if (status != IXGBE_SUCCESS)
 755  810                          goto out;
 756  811  
 757  812                  /*
 758  813                   * Wait for the controller to acquire link.  Per IEEE 802.3ap,
 759  814                   * Section 73.10.2, we may have to wait up to 500ms if KR is
 760  815                   * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
 761  816                   * Table 9 in the AN MAS.
 762  817                   */
↓ open down ↓ 14 lines elided ↑ open up ↑
 777  832          /*
 778  833           * We didn't get link.  If we advertised KR plus one of KX4/KX
 779  834           * (or BX4/BX), then disable KR and try again.
 780  835           */
 781  836          if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
 782  837              ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
 783  838                  goto out;
 784  839  
 785  840          /* Turn SmartSpeed on to disable KR support */
 786  841          hw->phy.smart_speed_active = TRUE;
 787      -        status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
      842 +        status = ixgbe_setup_mac_link_82599(hw, speed,
 788  843                                              autoneg_wait_to_complete);
 789  844          if (status != IXGBE_SUCCESS)
 790  845                  goto out;
 791  846  
 792  847          /*
 793  848           * Wait for the controller to acquire link.  600ms will allow for
 794  849           * the AN link_fail_inhibit_timer as well for multiple cycles of
 795  850           * parallel detect, both 10g and 1g. This allows for the maximum
 796  851           * connect attempts as defined in the AN MAS table 73-7.
 797  852           */
↓ open down ↓ 4 lines elided ↑ open up ↑
 802  857                  status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 803  858                  if (status != IXGBE_SUCCESS)
 804  859                          goto out;
 805  860  
 806  861                  if (link_up)
 807  862                          goto out;
 808  863          }
 809  864  
 810  865          /* We didn't get link.  Turn SmartSpeed back off. */
 811  866          hw->phy.smart_speed_active = FALSE;
 812      -        status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
      867 +        status = ixgbe_setup_mac_link_82599(hw, speed,
 813  868                                              autoneg_wait_to_complete);
 814  869  
 815  870  out:
 816  871          if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
 817  872                  DEBUGOUT("Smartspeed has downgraded the link speed "
 818  873                  "from the maximum advertised\n");
 819  874          return status;
 820  875  }
 821  876  
 822  877  /**
 823  878   *  ixgbe_setup_mac_link_82599 - Set MAC link speed
 824  879   *  @hw: pointer to hardware structure
 825  880   *  @speed: new link speed
 826      - *  @autoneg: TRUE if autonegotiation enabled
 827  881   *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
 828  882   *
 829  883   *  Set the link speed in the AUTOC register and restarts link.
 830  884   **/
 831  885  s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 832      -                               ixgbe_link_speed speed, bool autoneg,
      886 +                               ixgbe_link_speed speed,
 833  887                                 bool autoneg_wait_to_complete)
 834  888  {
      889 +        bool autoneg = FALSE;
 835  890          s32 status = IXGBE_SUCCESS;
 836      -        u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
      891 +        u32 pma_pmd_1g, link_mode;
      892 +        u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
      893 +        u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
      894 +        u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
 837  895          u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 838      -        u32 start_autoc = autoc;
 839      -        u32 orig_autoc = 0;
 840      -        u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
 841      -        u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
 842  896          u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
 843  897          u32 links_reg;
 844  898          u32 i;
 845  899          ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
 846  900  
 847  901          DEBUGFUNC("ixgbe_setup_mac_link_82599");
 848  902  
 849  903          /* Check to see if speed passed in is supported. */
 850  904          status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
 851      -        if (status != IXGBE_SUCCESS)
      905 +        if (status)
 852  906                  goto out;
 853  907  
 854  908          speed &= link_capabilities;
 855  909  
 856  910          if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
 857  911                  status = IXGBE_ERR_LINK_SETUP;
 858  912                  goto out;
 859  913          }
 860  914  
 861  915          /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
 862  916          if (hw->mac.orig_link_settings_stored)
 863  917                  orig_autoc = hw->mac.orig_autoc;
 864  918          else
 865  919                  orig_autoc = autoc;
 866  920  
      921 +        link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
      922 +        pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
      923 +
 867  924          if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
 868  925              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
 869  926              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 870  927                  /* Set KX4/KX/KR support according to speed requested */
 871  928                  autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
 872  929                  if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
 873  930                          if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
 874  931                                  autoc |= IXGBE_AUTOC_KX4_SUPP;
 875  932                          if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
 876  933                              (hw->phy.smart_speed_active == FALSE))
↓ open down ↓ 9 lines elided ↑ open up ↑
 886  943                      (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
 887  944                          autoc &= ~IXGBE_AUTOC_LMS_MASK;
 888  945                          autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
 889  946                  }
 890  947          } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
 891  948                     (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
 892  949                  /* Switch from 10G SFI to 1G SFI if requested */
 893  950                  if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
 894  951                      (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
 895  952                          autoc &= ~IXGBE_AUTOC_LMS_MASK;
 896      -                        if (autoneg)
      953 +                        if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
 897  954                                  autoc |= IXGBE_AUTOC_LMS_1G_AN;
 898  955                          else
 899  956                                  autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
 900  957                  }
 901  958          }
 902  959  
 903      -        if (autoc != start_autoc) {
      960 +        if (autoc != current_autoc) {
 904  961                  /* Restart link */
 905      -                autoc |= IXGBE_AUTOC_AN_RESTART;
 906      -                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
      962 +                status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
      963 +                if (status != IXGBE_SUCCESS)
      964 +                        goto out;
 907  965  
 908  966                  /* Only poll for autoneg to complete if specified to do so */
 909  967                  if (autoneg_wait_to_complete) {
 910  968                          if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
 911  969                              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
 912  970                              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 913  971                                  links_reg = 0; /*Just in case Autoneg time=0*/
 914  972                                  for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
 915  973                                          links_reg =
 916  974                                                 IXGBE_READ_REG(hw, IXGBE_LINKS);
↓ open down ↓ 14 lines elided ↑ open up ↑
 931  989          }
 932  990  
 933  991  out:
 934  992          return status;
 935  993  }
 936  994  
 937  995  /**
 938  996   *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
 939  997   *  @hw: pointer to hardware structure
 940  998   *  @speed: new link speed
 941      - *  @autoneg: TRUE if autonegotiation enabled
 942  999   *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
 943 1000   *
 944 1001   *  Restarts link on PHY and MAC based on settings passed in.
 945 1002   **/
 946 1003  static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
 947 1004                                           ixgbe_link_speed speed,
 948      -                                         bool autoneg,
 949 1005                                           bool autoneg_wait_to_complete)
 950 1006  {
 951 1007          s32 status;
 952 1008  
 953 1009          DEBUGFUNC("ixgbe_setup_copper_link_82599");
 954 1010  
 955 1011          /* Setup the PHY according to input speed */
 956      -        status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
     1012 +        status = hw->phy.ops.setup_link_speed(hw, speed,
 957 1013                                                autoneg_wait_to_complete);
 958      -        if (status == IXGBE_SUCCESS) {
 959      -                /* Set up MAC */
 960      -                status =
 961      -                    ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
 962      -        }
     1014 +        /* Set up MAC */
     1015 +        ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
 963 1016  
 964 1017          return status;
 965 1018  }
 966 1019  
 967 1020  /**
 968 1021   *  ixgbe_reset_hw_82599 - Perform hardware reset
 969 1022   *  @hw: pointer to hardware structure
 970 1023   *
 971 1024   *  Resets the hardware by resetting the transmit and receive units, masks
 972 1025   *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
 973 1026   *  reset.
 974 1027   **/
 975 1028  s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 976 1029  {
 977 1030          ixgbe_link_speed link_speed;
 978 1031          s32 status;
 979      -        u32 ctrl, i, autoc, autoc2;
     1032 +        u32 ctrl = 0;
     1033 +        u32 i, autoc, autoc2;
     1034 +        u32 curr_lms;
 980 1035          bool link_up = FALSE;
 981 1036  
 982 1037          DEBUGFUNC("ixgbe_reset_hw_82599");
 983 1038  
 984 1039          /* Call adapter stop to disable tx/rx and clear interrupts */
 985 1040          status = hw->mac.ops.stop_adapter(hw);
 986 1041          if (status != IXGBE_SUCCESS)
 987 1042                  goto reset_hw_out;
 988 1043  
 989 1044          /* flush pending Tx transactions */
↓ open down ↓ 13 lines elided ↑ open up ↑
1003 1058                  hw->phy.sfp_setup_needed = FALSE;
1004 1059          }
1005 1060  
1006 1061          if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1007 1062                  goto reset_hw_out;
1008 1063  
1009 1064          /* Reset PHY */
1010 1065          if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1011 1066                  hw->phy.ops.reset(hw);
1012 1067  
     1068 +        /* remember AUTOC from before we reset */
     1069 +        curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
     1070 +
1013 1071  mac_reset_top:
1014 1072          /*
1015 1073           * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1016 1074           * If link reset is used when link is up, it might reset the PHY when
1017 1075           * mng is using it.  If link is down or the flag to force full link
1018 1076           * reset is set, then perform link reset.
1019 1077           */
1020 1078          ctrl = IXGBE_CTRL_LNK_RST;
1021 1079          if (!hw->force_full_reset) {
1022 1080                  hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1023 1081                  if (link_up)
1024 1082                          ctrl = IXGBE_CTRL_RST;
1025 1083          }
1026 1084  
1027 1085          ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1028 1086          IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1029 1087          IXGBE_WRITE_FLUSH(hw);
1030 1088  
1031      -        /* Poll for reset bit to self-clear indicating reset is complete */
     1089 +        /* Poll for reset bit to self-clear meaning reset is complete */
1032 1090          for (i = 0; i < 10; i++) {
1033 1091                  usec_delay(1);
1034 1092                  ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1035 1093                  if (!(ctrl & IXGBE_CTRL_RST_MASK))
1036 1094                          break;
1037 1095          }
1038 1096  
1039 1097          if (ctrl & IXGBE_CTRL_RST_MASK) {
1040 1098                  status = IXGBE_ERR_RESET_FAILED;
1041 1099                  DEBUGOUT("Reset polling failed to complete.\n");
1042 1100          }
1043 1101  
1044 1102          msec_delay(50);
1045 1103  
1046 1104          /*
1047 1105           * Double resets are required for recovery from certain error
1048      -         * conditions.  Between resets, it is necessary to stall to allow time
1049      -         * for any pending HW events to complete.
     1106 +         * conditions.  Between resets, it is necessary to stall to
     1107 +         * allow time for any pending HW events to complete.
1050 1108           */
1051 1109          if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1052 1110                  hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1053 1111                  goto mac_reset_top;
1054 1112          }
1055 1113  
1056 1114          /*
1057 1115           * Store the original AUTOC/AUTOC2 values if they have not been
1058 1116           * stored off yet.  Otherwise restore the stored original
1059 1117           * values since the reset operation sets back to defaults.
1060 1118           */
1061 1119          autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1062 1120          autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
     1121 +
     1122 +        /* Enable link if disabled in NVM */
     1123 +        if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
     1124 +                autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
     1125 +                IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
     1126 +                IXGBE_WRITE_FLUSH(hw);
     1127 +        }
     1128 +
1063 1129          if (hw->mac.orig_link_settings_stored == FALSE) {
1064 1130                  hw->mac.orig_autoc = autoc;
1065 1131                  hw->mac.orig_autoc2 = autoc2;
1066 1132                  hw->mac.orig_link_settings_stored = TRUE;
1067 1133          } else {
1068      -                if (autoc != hw->mac.orig_autoc)
1069      -                        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1070      -                                        IXGBE_AUTOC_AN_RESTART));
1071 1134  
     1135 +                /* If MNG FW is running on a multi-speed device that
     1136 +                 * doesn't autoneg with out driver support we need to
     1137 +                 * leave LMS in the state it was before we MAC reset.
     1138 +                 * Likewise if we support WoL we don't want change the
     1139 +                 * LMS state.
     1140 +                 */
     1141 +                if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
     1142 +                    hw->wol_enabled)
     1143 +                        hw->mac.orig_autoc =
     1144 +                                (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
     1145 +                                curr_lms;
     1146 +
     1147 +                if (autoc != hw->mac.orig_autoc) {
     1148 +                        status = hw->mac.ops.prot_autoc_write(hw,
     1149 +                                                        hw->mac.orig_autoc,
     1150 +                                                        FALSE);
     1151 +                        if (status != IXGBE_SUCCESS)
     1152 +                                goto reset_hw_out;
     1153 +                }
     1154 +
1072 1155                  if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1073 1156                      (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1074 1157                          autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1075 1158                          autoc2 |= (hw->mac.orig_autoc2 &
1076 1159                                     IXGBE_AUTOC2_UPPER_MASK);
1077 1160                          IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1078 1161                  }
1079 1162          }
1080 1163  
1081 1164          /* Store the permanent mac address */
↓ open down ↓ 24 lines elided ↑ open up ↑
1106 1189  
1107 1190          /* Store the alternative WWNN/WWPN prefix */
1108 1191          hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1109 1192                                     &hw->mac.wwpn_prefix);
1110 1193  
1111 1194  reset_hw_out:
1112 1195          return status;
1113 1196  }
1114 1197  
1115 1198  /**
     1199 + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
     1200 + * @hw: pointer to hardware structure
     1201 + * @fdircmd: current value of FDIRCMD register
     1202 + */
     1203 +static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
     1204 +{
     1205 +        int i;
     1206 +
     1207 +        for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
     1208 +                *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
     1209 +                if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
     1210 +                        return IXGBE_SUCCESS;
     1211 +                usec_delay(10);
     1212 +        }
     1213 +
     1214 +        return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
     1215 +}
     1216 +
     1217 +/**
1116 1218   *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1117 1219   *  @hw: pointer to hardware structure
1118 1220   **/
1119 1221  s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1120 1222  {
     1223 +        s32 err;
1121 1224          int i;
1122 1225          u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
     1226 +        u32 fdircmd;
1123 1227          fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1124 1228  
1125 1229          DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1126 1230  
1127 1231          /*
1128 1232           * Before starting reinitialization process,
1129 1233           * FDIRCMD.CMD must be zero.
1130 1234           */
1131      -        for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1132      -                if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1133      -                      IXGBE_FDIRCMD_CMD_MASK))
1134      -                        break;
1135      -                usec_delay(10);
     1235 +        err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
     1236 +        if (err) {
     1237 +                DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
     1238 +                return err;
1136 1239          }
1137      -        if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1138      -                DEBUGOUT("Flow Director previous command isn't complete, "
1139      -                         "aborting table re-initialization.\n");
1140      -                return IXGBE_ERR_FDIR_REINIT_FAILED;
1141      -        }
1142 1240  
1143 1241          IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1144 1242          IXGBE_WRITE_FLUSH(hw);
1145 1243          /*
1146 1244           * 82599 adapters flow director init flow cannot be restarted,
1147 1245           * Workaround 82599 silicon errata by performing the following steps
1148 1246           * before re-writing the FDIRCTRL control register with the same value.
1149 1247           * - write 1 to bit 8 of FDIRCMD register &
1150 1248           * - write 0 to bit 8 of FDIRCMD register
1151 1249           */
↓ open down ↓ 13 lines elided ↑ open up ↑
1165 1263          IXGBE_WRITE_FLUSH(hw);
1166 1264  
1167 1265          IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1168 1266          IXGBE_WRITE_FLUSH(hw);
1169 1267  
1170 1268          /* Poll init-done after we write FDIRCTRL register */
1171 1269          for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1172 1270                  if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1173 1271                                     IXGBE_FDIRCTRL_INIT_DONE)
1174 1272                          break;
1175      -                usec_delay(10);
     1273 +                msec_delay(1);
1176 1274          }
1177 1275          if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1178 1276                  DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1179 1277                  return IXGBE_ERR_FDIR_REINIT_FAILED;
1180 1278          }
1181 1279  
1182 1280          /* Clear FDIR statistics registers (read to clear) */
1183      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1184      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1185      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1186      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1187      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
     1281 +        IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
     1282 +        IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
     1283 +        IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
     1284 +        IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
     1285 +        IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1188 1286  
1189 1287          return IXGBE_SUCCESS;
1190 1288  }
1191 1289  
1192 1290  /**
1193 1291   *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1194 1292   *  @hw: pointer to hardware structure
1195 1293   *  @fdirctrl: value to write to flow director control register
1196 1294   **/
1197 1295  static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
↓ open down ↓ 56 lines elided ↑ open up ↑
1254 1352          ixgbe_fdir_enable_82599(hw, fdirctrl);
1255 1353  
1256 1354          return IXGBE_SUCCESS;
1257 1355  }
1258 1356  
1259 1357  /**
1260 1358   *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1261 1359   *  @hw: pointer to hardware structure
1262 1360   *  @fdirctrl: value to write to flow director control register, initially
1263 1361   *           contains just the value of the Rx packet buffer allocation
     1362 + *  @cloud_mode: TRUE - cloud mode, FALSE - other mode
1264 1363   **/
1265      -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
     1364 +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
     1365 +                        bool cloud_mode)
1266 1366  {
1267 1367          DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1268 1368  
1269 1369          /*
1270 1370           * Continue setup of fdirctrl register bits:
1271 1371           *  Turn perfect match filtering on
1272 1372           *  Report hash in RSS field of Rx wb descriptor
1273      -         *  Initialize the drop queue
     1373 +         *  Initialize the drop queue to queue 127
1274 1374           *  Move the flexible bytes to use the ethertype - shift 6 words
1275 1375           *  Set the maximum length per hash bucket to 0xA filters
1276 1376           *  Send interrupt when 64 (0x4 * 16) filters are left
1277 1377           */
1278 1378          fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1279 1379                      IXGBE_FDIRCTRL_REPORT_STATUS |
1280 1380                      (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1281 1381                      (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1282 1382                      (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1283 1383                      (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
     1384 +        if ((hw->mac.type == ixgbe_mac_X550) ||
     1385 +            (hw->mac.type == ixgbe_mac_X550EM_x))
     1386 +                fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1284 1387  
     1388 +        if (cloud_mode)
     1389 +                fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
     1390 +                                        IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
     1391 +
1285 1392          /* write hashes and fdirctrl register, poll for completion */
1286 1393          ixgbe_fdir_enable_82599(hw, fdirctrl);
1287 1394  
1288 1395          return IXGBE_SUCCESS;
1289 1396  }
1290 1397  
     1398 +/**
     1399 + *  ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
     1400 + *  @hw: pointer to hardware structure
     1401 + *  @dropqueue: Rx queue index used for the dropped packets
     1402 + **/
     1403 +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
     1404 +{
     1405 +        u32 fdirctrl;
     1406 +
     1407 +        DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
     1408 +        /* Clear init done bit and drop queue field */
     1409 +        fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
     1410 +        fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
     1411 +
     1412 +        /* Set drop queue */
     1413 +        fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
     1414 +        if ((hw->mac.type == ixgbe_mac_X550) ||
     1415 +            (hw->mac.type == ixgbe_mac_X550EM_x))
     1416 +                fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
     1417 +
     1418 +        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
     1419 +                        (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
     1420 +                         IXGBE_FDIRCMD_CLEARHT));
     1421 +        IXGBE_WRITE_FLUSH(hw);
     1422 +        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
     1423 +                        (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
     1424 +                         ~IXGBE_FDIRCMD_CLEARHT));
     1425 +        IXGBE_WRITE_FLUSH(hw);
     1426 +
     1427 +        /* write hashes and fdirctrl register, poll for completion */
     1428 +        ixgbe_fdir_enable_82599(hw, fdirctrl);
     1429 +}
     1430 +
1291 1431  /*
1292 1432   * These defines allow us to quickly generate all of the necessary instructions
1293 1433   * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1294 1434   * for values 0 through 15
1295 1435   */
1296 1436  #define IXGBE_ATR_COMMON_HASH_KEY \
1297 1437                  (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1298      -#ifdef lint
1299      -#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n)
1300      -#else
1301 1438  #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1302 1439  do { \
1303 1440          u32 n = (_n); \
1304 1441          if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1305 1442                  common_hash ^= lo_hash_dword >> n; \
1306 1443          else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1307 1444                  bucket_hash ^= lo_hash_dword >> n; \
1308 1445          else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1309 1446                  sig_hash ^= lo_hash_dword << (16 - n); \
1310 1447          if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1311 1448                  common_hash ^= hi_hash_dword >> n; \
1312 1449          else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1313 1450                  bucket_hash ^= hi_hash_dword >> n; \
1314 1451          else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1315 1452                  sig_hash ^= hi_hash_dword << (16 - n); \
1316      -} while (0);
1317      -#endif
     1453 +} while (0)
1318 1454  
1319 1455  /**
1320 1456   *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1321 1457   *  @stream: input bitstream to compute the hash on
1322 1458   *
1323 1459   *  This function is almost identical to the function above but contains
1324      - *  several optomizations such as unwinding all of the loops, letting the
     1460 + *  several optimizations such as unwinding all of the loops, letting the
1325 1461   *  compiler work out all of the conditional ifs since the keys are static
1326 1462   *  defines, and computing two keys at once since the hashed dword stream
1327 1463   *  will be the same for both keys.
1328 1464   **/
1329 1465  u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1330 1466                                       union ixgbe_atr_hash_dword common)
1331 1467  {
1332 1468          u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1333 1469          u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1334 1470  
↓ open down ↓ 8 lines elided ↑ open up ↑
1343 1479  
1344 1480          /* apply flow ID/VM pool/VLAN ID bits to hash words */
1345 1481          hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1346 1482  
1347 1483          /* Process bits 0 and 16 */
1348 1484          IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1349 1485  
1350 1486          /*
1351 1487           * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1352 1488           * delay this because bit 0 of the stream should not be processed
1353      -         * so we do not add the vlan until after bit 0 was processed
     1489 +         * so we do not add the VLAN until after bit 0 was processed
1354 1490           */
1355 1491          lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1356 1492  
1357 1493          /* Process remaining 30 bit of the key */
1358 1494          IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1359 1495          IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1360 1496          IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1361 1497          IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1362 1498          IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1363 1499          IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
↓ open down ↓ 17 lines elided ↑ open up ↑
1381 1517          /* return completed signature hash */
1382 1518          return sig_hash ^ bucket_hash;
1383 1519  }
1384 1520  
1385 1521  /**
1386 1522   *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1387 1523   *  @hw: pointer to hardware structure
1388 1524   *  @input: unique input dword
1389 1525   *  @common: compressed common input dword
1390 1526   *  @queue: queue index to direct traffic to
     1527 + *
     1528 + * Note that the tunnel bit in input must not be set when the hardware
     1529 + * tunneling support does not exist.
1391 1530   **/
1392      -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1393      -                                          union ixgbe_atr_hash_dword input,
1394      -                                          union ixgbe_atr_hash_dword common,
1395      -                                          u8 queue)
     1531 +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
     1532 +                                           union ixgbe_atr_hash_dword input,
     1533 +                                           union ixgbe_atr_hash_dword common,
     1534 +                                           u8 queue)
1396 1535  {
1397      -        u64  fdirhashcmd;
1398      -        u32  fdircmd;
     1536 +        u64 fdirhashcmd;
     1537 +        u8 flow_type;
     1538 +        bool tunnel;
     1539 +        u32 fdircmd;
1399 1540  
1400 1541          DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1401 1542  
1402 1543          /*
1403 1544           * Get the flow_type in order to program FDIRCMD properly
1404 1545           * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
     1546 +         * fifth is FDIRCMD.TUNNEL_FILTER
1405 1547           */
1406      -        switch (input.formatted.flow_type) {
     1548 +        tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
     1549 +        flow_type = input.formatted.flow_type &
     1550 +                    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
     1551 +        switch (flow_type) {
1407 1552          case IXGBE_ATR_FLOW_TYPE_TCPV4:
1408 1553          case IXGBE_ATR_FLOW_TYPE_UDPV4:
1409 1554          case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1410 1555          case IXGBE_ATR_FLOW_TYPE_TCPV6:
1411 1556          case IXGBE_ATR_FLOW_TYPE_UDPV6:
1412 1557          case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1413 1558                  break;
1414 1559          default:
1415 1560                  DEBUGOUT(" Error on flow type input\n");
1416      -                return IXGBE_ERR_CONFIG;
     1561 +                return;
1417 1562          }
1418 1563  
1419 1564          /* configure FDIRCMD register */
1420 1565          fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1421 1566                    IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1422      -        fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
     1567 +        fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1423 1568          fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
     1569 +        if (tunnel)
     1570 +                fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1424 1571  
1425 1572          /*
1426 1573           * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1427 1574           * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1428 1575           */
1429 1576          fdirhashcmd = (u64)fdircmd << 32;
1430 1577          fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1431 1578          IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1432 1579  
1433 1580          DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1434 1581  
1435      -        return IXGBE_SUCCESS;
     1582 +        return;
1436 1583  }
1437 1584  
1438      -#ifdef lint
1439      -#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n)
1440      -#else
1441 1585  #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1442 1586  do { \
1443 1587          u32 n = (_n); \
1444 1588          if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1445 1589                  bucket_hash ^= lo_hash_dword >> n; \
1446 1590          if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1447 1591                  bucket_hash ^= hi_hash_dword >> n; \
1448      -} while (0);
1449      -#endif
     1592 +} while (0)
     1593 +
1450 1594  /**
1451 1595   *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1452 1596   *  @atr_input: input bitstream to compute the hash on
1453 1597   *  @input_mask: mask for the input bitstream
1454 1598   *
1455      - *  This function serves two main purposes.  First it applys the input_mask
     1599 + *  This function serves two main purposes.  First it applies the input_mask
1456 1600   *  to the atr_input resulting in a cleaned up atr_input data stream.
1457 1601   *  Secondly it computes the hash and stores it in the bkt_hash field at
1458 1602   *  the end of the input byte stream.  This way it will be available for
1459 1603   *  future use without needing to recompute the hash.
1460 1604   **/
1461 1605  void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1462 1606                                            union ixgbe_atr_input *input_mask)
1463 1607  {
1464 1608  
1465 1609          u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1466 1610          u32 bucket_hash = 0;
     1611 +        u32 hi_dword = 0;
     1612 +        u32 i = 0;
1467 1613  
1468 1614          /* Apply masks to input data */
1469      -        input->dword_stream[0]  &= input_mask->dword_stream[0];
1470      -        input->dword_stream[1]  &= input_mask->dword_stream[1];
1471      -        input->dword_stream[2]  &= input_mask->dword_stream[2];
1472      -        input->dword_stream[3]  &= input_mask->dword_stream[3];
1473      -        input->dword_stream[4]  &= input_mask->dword_stream[4];
1474      -        input->dword_stream[5]  &= input_mask->dword_stream[5];
1475      -        input->dword_stream[6]  &= input_mask->dword_stream[6];
1476      -        input->dword_stream[7]  &= input_mask->dword_stream[7];
1477      -        input->dword_stream[8]  &= input_mask->dword_stream[8];
1478      -        input->dword_stream[9]  &= input_mask->dword_stream[9];
1479      -        input->dword_stream[10] &= input_mask->dword_stream[10];
     1615 +        for (i = 0; i < 14; i++)
     1616 +                input->dword_stream[i]  &= input_mask->dword_stream[i];
1480 1617  
1481 1618          /* record the flow_vm_vlan bits as they are a key part to the hash */
1482 1619          flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1483 1620  
1484 1621          /* generate common hash dword */
1485      -        hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1486      -                                    input->dword_stream[2] ^
1487      -                                    input->dword_stream[3] ^
1488      -                                    input->dword_stream[4] ^
1489      -                                    input->dword_stream[5] ^
1490      -                                    input->dword_stream[6] ^
1491      -                                    input->dword_stream[7] ^
1492      -                                    input->dword_stream[8] ^
1493      -                                    input->dword_stream[9] ^
1494      -                                    input->dword_stream[10]);
     1622 +        for (i = 1; i <= 13; i++)
     1623 +                hi_dword ^= input->dword_stream[i];
     1624 +        hi_hash_dword = IXGBE_NTOHL(hi_dword);
1495 1625  
1496 1626          /* low dword is word swapped version of common */
1497 1627          lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1498 1628  
1499 1629          /* apply flow ID/VM pool/VLAN ID bits to hash words */
1500 1630          hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1501 1631  
1502 1632          /* Process bits 0 and 16 */
1503 1633          IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1504 1634  
1505 1635          /*
1506 1636           * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1507 1637           * delay this because bit 0 of the stream should not be processed
1508      -         * so we do not add the vlan until after bit 0 was processed
     1638 +         * so we do not add the VLAN until after bit 0 was processed
1509 1639           */
1510 1640          lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1511 1641  
1512 1642          /* Process remaining 30 bit of the key */
1513      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1514      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1515      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1516      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1517      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1518      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1519      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1520      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1521      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1522      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1523      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1524      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1525      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1526      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1527      -        IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
     1643 +        for (i = 1; i <= 15; i++)
     1644 +                IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1528 1645  
1529 1646          /*
1530 1647           * Limit hash to 13 bits since max bucket count is 8K.
1531 1648           * Store result at the end of the input stream.
1532 1649           */
1533 1650          input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1534 1651  }
1535 1652  
1536 1653  /**
1537      - *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
     1654 + *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1538 1655   *  @input_mask: mask to be bit swapped
1539 1656   *
1540 1657   *  The source and destination port masks for flow director are bit swapped
1541 1658   *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1542 1659   *  generate a correctly swapped value we need to bit swap the mask and that
1543 1660   *  is what is accomplished by this function.
1544 1661   **/
1545 1662  static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1546 1663  {
1547 1664          u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
↓ open down ↓ 16 lines elided ↑ open up ↑
1564 1681          (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1565 1682           (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1566 1683  
1567 1684  #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1568 1685          IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1569 1686  
1570 1687  #define IXGBE_STORE_AS_BE16(_value) \
1571 1688          IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1572 1689  
1573 1690  s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1574      -                                    union ixgbe_atr_input *input_mask)
     1691 +                                    union ixgbe_atr_input *input_mask, bool cloud_mode)
1575 1692  {
1576 1693          /* mask IPv6 since it is currently not supported */
1577 1694          u32 fdirm = IXGBE_FDIRM_DIPv6;
1578 1695          u32 fdirtcpm;
1579      -
     1696 +        u32 fdirip6m;
1580 1697          DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1581 1698  
1582 1699          /*
1583 1700           * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1584 1701           * are zero, then assume a full mask for that field.  Also assume that
1585 1702           * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1586 1703           * cannot be masked out in this implementation.
1587 1704           *
1588 1705           * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1589 1706           * point in time.
1590 1707           */
1591 1708  
1592 1709          /* verify bucket hash is cleared on hash generation */
1593 1710          if (input_mask->formatted.bkt_hash)
1594 1711                  DEBUGOUT(" bucket hash should always be 0 in mask\n");
1595 1712  
1596 1713          /* Program FDIRM and verify partial masks */
1597 1714          switch (input_mask->formatted.vm_pool & 0x7F) {
1598 1715          case 0x0:
1599 1716                  fdirm |= IXGBE_FDIRM_POOL;
1600      -                /* FALLTHRU */
1601 1717          case 0x7F:
1602 1718                  break;
1603 1719          default:
1604 1720                  DEBUGOUT(" Error on vm pool mask\n");
1605 1721                  return IXGBE_ERR_CONFIG;
1606 1722          }
1607 1723  
1608 1724          switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1609 1725          case 0x0:
1610 1726                  fdirm |= IXGBE_FDIRM_L4P;
1611 1727                  if (input_mask->formatted.dst_port ||
1612 1728                      input_mask->formatted.src_port) {
1613 1729                          DEBUGOUT(" Error on src/dst port mask\n");
1614 1730                          return IXGBE_ERR_CONFIG;
1615 1731                  }
1616      -                /* FALLTHRU */
1617 1732          case IXGBE_ATR_L4TYPE_MASK:
1618 1733                  break;
1619 1734          default:
1620 1735                  DEBUGOUT(" Error on flow type mask\n");
1621 1736                  return IXGBE_ERR_CONFIG;
1622 1737          }
1623 1738  
1624 1739          switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1625 1740          case 0x0000:
1626 1741                  /* mask VLAN ID, fall through to mask VLAN priority */
1627 1742                  fdirm |= IXGBE_FDIRM_VLANID;
1628      -                /* FALLTHRU */
1629 1743          case 0x0FFF:
1630 1744                  /* mask VLAN priority */
1631 1745                  fdirm |= IXGBE_FDIRM_VLANP;
1632 1746                  break;
1633 1747          case 0xE000:
1634 1748                  /* mask VLAN ID only, fall through */
1635 1749                  fdirm |= IXGBE_FDIRM_VLANID;
1636      -                /* FALLTHRU */
1637 1750          case 0xEFFF:
1638 1751                  /* no VLAN fields masked */
1639 1752                  break;
1640 1753          default:
1641 1754                  DEBUGOUT(" Error on VLAN mask\n");
1642 1755                  return IXGBE_ERR_CONFIG;
1643 1756          }
1644 1757  
1645 1758          switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1646 1759          case 0x0000:
1647 1760                  /* Mask Flex Bytes, fall through */
1648 1761                  fdirm |= IXGBE_FDIRM_FLEX;
1649      -                /* FALLTHRU */
1650 1762          case 0xFFFF:
1651 1763                  break;
1652 1764          default:
1653 1765                  DEBUGOUT(" Error on flexible byte mask\n");
1654 1766                  return IXGBE_ERR_CONFIG;
1655 1767          }
1656 1768  
     1769 +        if (cloud_mode) {
     1770 +                fdirm |= IXGBE_FDIRM_L3P;
     1771 +                fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
     1772 +                fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
     1773 +
     1774 +                switch (input_mask->formatted.inner_mac[0] & 0xFF) {
     1775 +                case 0x00:
     1776 +                        /* Mask inner MAC, fall through */
     1777 +                        fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
     1778 +                case 0xFF:
     1779 +                        break;
     1780 +                default:
     1781 +                        DEBUGOUT(" Error on inner_mac byte mask\n");
     1782 +                        return IXGBE_ERR_CONFIG;
     1783 +                }
     1784 +
     1785 +                switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
     1786 +                case 0x0:
     1787 +                        /* Mask vxlan id */
     1788 +                        fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
     1789 +                        break;
     1790 +                case 0x00FFFFFF:
     1791 +                        fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
     1792 +                        break;
     1793 +                case 0xFFFFFFFF:
     1794 +                        break;
     1795 +                default:
     1796 +                        DEBUGOUT(" Error on TNI/VNI byte mask\n");
     1797 +                        return IXGBE_ERR_CONFIG;
     1798 +                }
     1799 +
     1800 +                switch (input_mask->formatted.tunnel_type & 0xFFFF) {
     1801 +                case 0x0:
     1802 +                        /* Mask turnnel type, fall through */
     1803 +                        fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
     1804 +                case 0xFFFF:
     1805 +                        break;
     1806 +                default:
     1807 +                        DEBUGOUT(" Error on tunnel type byte mask\n");
     1808 +                        return IXGBE_ERR_CONFIG;
     1809 +                }
     1810 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
     1811 +
     1812 +                /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and
     1813 +                 * FDIRDIP4M in cloud mode to allow L3/L3 packets to
     1814 +                 * tunnel.
     1815 +                 */
     1816 +                IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
     1817 +                IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
     1818 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
     1819 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
     1820 +        }
     1821 +
1657 1822          /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1658 1823          IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1659 1824  
1660      -        /* store the TCP/UDP port masks, bit reversed from port layout */
1661      -        fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
     1825 +        if (!cloud_mode) {
     1826 +                /* store the TCP/UDP port masks, bit reversed from port
     1827 +                 * layout */
     1828 +                fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1662 1829  
1663      -        /* write both the same so that UDP and TCP use the same mask */
1664      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1665      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
     1830 +                /* write both the same so that UDP and TCP use the same mask */
     1831 +                IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
     1832 +                IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
     1833 +                /* also use it for SCTP */
     1834 +                switch (hw->mac.type) {
     1835 +                case ixgbe_mac_X550:
     1836 +                case ixgbe_mac_X550EM_x:
     1837 +                        IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
     1838 +                        break;
     1839 +                default:
     1840 +                        break;
     1841 +                }
1666 1842  
1667      -        /* store source and destination IP masks (big-enian) */
1668      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1669      -                             ~input_mask->formatted.src_ip[0]);
1670      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1671      -                             ~input_mask->formatted.dst_ip[0]);
1672      -
     1843 +                /* store source and destination IP masks (big-enian) */
     1844 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
     1845 +                                     ~input_mask->formatted.src_ip[0]);
     1846 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
     1847 +                                     ~input_mask->formatted.dst_ip[0]);
     1848 +        }
1673 1849          return IXGBE_SUCCESS;
1674 1850  }
1675 1851  
1676 1852  s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1677 1853                                            union ixgbe_atr_input *input,
1678      -                                          u16 soft_id, u8 queue)
     1854 +                                          u16 soft_id, u8 queue, bool cloud_mode)
1679 1855  {
1680 1856          u32 fdirport, fdirvlan, fdirhash, fdircmd;
     1857 +        u32 addr_low, addr_high;
     1858 +        u32 cloud_type = 0;
     1859 +        s32 err;
1681 1860  
1682 1861          DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
     1862 +        if (!cloud_mode) {
     1863 +                /* currently IPv6 is not supported, must be programmed with 0 */
     1864 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
     1865 +                                     input->formatted.src_ip[0]);
     1866 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
     1867 +                                     input->formatted.src_ip[1]);
     1868 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
     1869 +                                     input->formatted.src_ip[2]);
1683 1870  
1684      -        /* currently IPv6 is not supported, must be programmed with 0 */
1685      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1686      -                             input->formatted.src_ip[0]);
1687      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1688      -                             input->formatted.src_ip[1]);
1689      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1690      -                             input->formatted.src_ip[2]);
     1871 +                /* record the source address (big-endian) */
     1872 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
     1873 +                        input->formatted.src_ip[0]);
1691 1874  
1692      -        /* record the source address (big-endian) */
1693      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
     1875 +                /* record the first 32 bits of the destination address
     1876 +                 * (big-endian) */
     1877 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
     1878 +                        input->formatted.dst_ip[0]);
1694 1879  
1695      -        /* record the first 32 bits of the destination address (big-endian) */
1696      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
     1880 +                /* record source and destination port (little-endian)*/
     1881 +                fdirport = IXGBE_NTOHS(input->formatted.dst_port);
     1882 +                fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
     1883 +                fdirport |= IXGBE_NTOHS(input->formatted.src_port);
     1884 +                IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
     1885 +        }
1697 1886  
1698      -        /* record source and destination port (little-endian)*/
1699      -        fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1700      -        fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1701      -        fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1702      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1703      -
1704      -        /* record vlan (little-endian) and flex_bytes(big-endian) */
     1887 +        /* record VLAN (little-endian) and flex_bytes(big-endian) */
1705 1888          fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1706 1889          fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1707 1890          fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1708 1891          IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1709 1892  
     1893 +        if (cloud_mode) {
     1894 +                if (input->formatted.tunnel_type != 0)
     1895 +                        cloud_type = 0x80000000;
     1896 +
     1897 +                addr_low = ((u32)input->formatted.inner_mac[0] |
     1898 +                                ((u32)input->formatted.inner_mac[1] << 8) |
     1899 +                                ((u32)input->formatted.inner_mac[2] << 16) |
     1900 +                                ((u32)input->formatted.inner_mac[3] << 24));
     1901 +                addr_high = ((u32)input->formatted.inner_mac[4] |
     1902 +                                ((u32)input->formatted.inner_mac[5] << 8));
     1903 +                cloud_type |= addr_high;
     1904 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
     1905 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
     1906 +                IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
     1907 +        }
     1908 +
1710 1909          /* configure FDIRHASH register */
1711 1910          fdirhash = input->formatted.bkt_hash;
1712 1911          fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1713 1912          IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1714 1913  
1715 1914          /*
1716 1915           * flush all previous writes to make certain registers are
1717 1916           * programmed prior to issuing the command
1718 1917           */
1719 1918          IXGBE_WRITE_FLUSH(hw);
1720 1919  
1721 1920          /* configure FDIRCMD register */
1722 1921          fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1723 1922                    IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1724 1923          if (queue == IXGBE_FDIR_DROP_QUEUE)
1725 1924                  fdircmd |= IXGBE_FDIRCMD_DROP;
     1925 +        if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
     1926 +                fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1726 1927          fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1727 1928          fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1728 1929          fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1729 1930  
1730 1931          IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
     1932 +        err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
     1933 +        if (err) {
     1934 +                DEBUGOUT("Flow Director command did not complete!\n");
     1935 +                return err;
     1936 +        }
1731 1937  
1732 1938          return IXGBE_SUCCESS;
1733 1939  }
1734 1940  
1735 1941  s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1736 1942                                            union ixgbe_atr_input *input,
1737 1943                                            u16 soft_id)
1738 1944  {
1739 1945          u32 fdirhash;
1740      -        u32 fdircmd = 0;
1741      -        u32 retry_count;
1742      -        s32 err = IXGBE_SUCCESS;
     1946 +        u32 fdircmd;
     1947 +        s32 err;
1743 1948  
1744 1949          /* configure FDIRHASH register */
1745 1950          fdirhash = input->formatted.bkt_hash;
1746 1951          fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1747 1952          IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1748 1953  
1749 1954          /* flush hash to HW */
1750 1955          IXGBE_WRITE_FLUSH(hw);
1751 1956  
1752 1957          /* Query if filter is present */
1753 1958          IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1754 1959  
1755      -        for (retry_count = 10; retry_count; retry_count--) {
1756      -                /* allow 10us for query to process */
1757      -                usec_delay(10);
1758      -                /* verify query completed successfully */
1759      -                fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1760      -                if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1761      -                        break;
     1960 +        err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
     1961 +        if (err) {
     1962 +                DEBUGOUT("Flow Director command did not complete!\n");
     1963 +                return err;
1762 1964          }
1763 1965  
1764      -        if (!retry_count)
1765      -                err = IXGBE_ERR_FDIR_REINIT_FAILED;
1766      -
1767 1966          /* if filter exists in hardware then remove it */
1768 1967          if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1769 1968                  IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1770 1969                  IXGBE_WRITE_FLUSH(hw);
1771 1970                  IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1772 1971                                  IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1773 1972          }
1774 1973  
1775      -        return err;
     1974 +        return IXGBE_SUCCESS;
1776 1975  }
1777 1976  
1778 1977  /**
1779 1978   *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1780 1979   *  @hw: pointer to hardware structure
1781 1980   *  @input: input bitstream
1782 1981   *  @input_mask: mask for the input bitstream
1783 1982   *  @soft_id: software index for the filters
1784 1983   *  @queue: queue index to direct traffic to
1785 1984   *
1786 1985   *  Note that the caller to this function must lock before calling, since the
1787 1986   *  hardware writes must be protected from one another.
1788 1987   **/
1789 1988  s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1790 1989                                          union ixgbe_atr_input *input,
1791 1990                                          union ixgbe_atr_input *input_mask,
1792      -                                        u16 soft_id, u8 queue)
     1991 +                                        u16 soft_id, u8 queue, bool cloud_mode)
1793 1992  {
1794 1993          s32 err = IXGBE_ERR_CONFIG;
1795 1994  
1796 1995          DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1797 1996  
1798 1997          /*
1799 1998           * Check flow_type formatting, and bail out before we touch the hardware
1800 1999           * if there's a configuration issue
1801 2000           */
1802 2001          switch (input->formatted.flow_type) {
1803 2002          case IXGBE_ATR_FLOW_TYPE_IPV4:
     2003 +        case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
1804 2004                  input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1805 2005                  if (input->formatted.dst_port || input->formatted.src_port) {
1806 2006                          DEBUGOUT(" Error on src/dst port\n");
1807 2007                          return IXGBE_ERR_CONFIG;
1808 2008                  }
1809 2009                  break;
1810 2010          case IXGBE_ATR_FLOW_TYPE_SCTPV4:
     2011 +        case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
1811 2012                  if (input->formatted.dst_port || input->formatted.src_port) {
1812 2013                          DEBUGOUT(" Error on src/dst port\n");
1813 2014                          return IXGBE_ERR_CONFIG;
1814 2015                  }
1815      -                /* FALLTHRU */
1816 2016          case IXGBE_ATR_FLOW_TYPE_TCPV4:
     2017 +        case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
1817 2018          case IXGBE_ATR_FLOW_TYPE_UDPV4:
     2019 +        case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
1818 2020                  input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1819 2021                                                    IXGBE_ATR_L4TYPE_MASK;
1820 2022                  break;
1821 2023          default:
1822 2024                  DEBUGOUT(" Error on flow type input\n");
1823 2025                  return err;
1824 2026          }
1825 2027  
1826 2028          /* program input mask into the HW */
1827      -        err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
     2029 +        err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
1828 2030          if (err)
1829 2031                  return err;
1830 2032  
1831 2033          /* apply mask and compute/store hash */
1832 2034          ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1833 2035  
1834 2036          /* program filters to filter memory */
1835 2037          return ixgbe_fdir_write_perfect_filter_82599(hw, input,
1836      -                                                     soft_id, queue);
     2038 +                                                     soft_id, queue, cloud_mode);
1837 2039  }
1838 2040  
1839 2041  /**
1840 2042   *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1841 2043   *  @hw: pointer to hardware structure
1842 2044   *  @reg: analog register to read
1843 2045   *  @val: read value
1844 2046   *
1845 2047   *  Performs read operation to Omer analog register specified.
1846 2048   **/
↓ open down ↓ 69 lines elided ↑ open up ↑
1916 2118  /**
1917 2119   *  ixgbe_identify_phy_82599 - Get physical layer module
1918 2120   *  @hw: pointer to hardware structure
1919 2121   *
1920 2122   *  Determines the physical layer module found on the current adapter.
1921 2123   *  If PHY already detected, maintains current PHY type in hw struct,
1922 2124   *  otherwise executes the PHY detection routine.
1923 2125   **/
1924 2126  s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1925 2127  {
1926      -        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
     2128 +        s32 status;
1927 2129  
1928 2130          DEBUGFUNC("ixgbe_identify_phy_82599");
1929 2131  
1930 2132          /* Detect PHY if not unknown - returns success if already detected. */
1931 2133          status = ixgbe_identify_phy_generic(hw);
1932 2134          if (status != IXGBE_SUCCESS) {
1933 2135                  /* 82599 10GBASE-T requires an external PHY */
1934 2136                  if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1935      -                        goto out;
     2137 +                        return status;
1936 2138                  else
1937 2139                          status = ixgbe_identify_module_generic(hw);
1938 2140          }
1939 2141  
1940 2142          /* Set PHY type none if no PHY detected */
1941 2143          if (hw->phy.type == ixgbe_phy_unknown) {
1942 2144                  hw->phy.type = ixgbe_phy_none;
1943      -                status = IXGBE_SUCCESS;
     2145 +                return IXGBE_SUCCESS;
1944 2146          }
1945 2147  
1946 2148          /* Return error if SFP module has been detected but is not supported */
1947 2149          if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1948      -                status = IXGBE_ERR_SFP_NOT_SUPPORTED;
     2150 +                return IXGBE_ERR_SFP_NOT_SUPPORTED;
1949 2151  
1950      -out:
1951 2152          return status;
1952 2153  }
1953 2154  
1954 2155  /**
1955 2156   *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
1956 2157   *  @hw: pointer to hardware structure
1957 2158   *
1958 2159   *  Determines physical layer capabilities of the current configuration.
1959 2160   **/
1960 2161  u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1961 2162  {
1962 2163          u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1963 2164          u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1964 2165          u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1965 2166          u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1966 2167          u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1967 2168          u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1968 2169          u16 ext_ability = 0;
1969      -        u8 comp_codes_10g = 0;
1970      -        u8 comp_codes_1g = 0;
1971 2170  
1972 2171          DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
1973 2172  
1974 2173          hw->phy.ops.identify(hw);
1975 2174  
1976 2175          switch (hw->phy.type) {
1977 2176          case ixgbe_phy_tn:
1978 2177          case ixgbe_phy_cu_unknown:
1979 2178                  hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1980 2179                  IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
↓ open down ↓ 8 lines elided ↑ open up ↑
1989 2188                  break;
1990 2189          }
1991 2190  
1992 2191          switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1993 2192          case IXGBE_AUTOC_LMS_1G_AN:
1994 2193          case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1995 2194                  if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1996 2195                          physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1997 2196                              IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1998 2197                          goto out;
1999      -                }
2000      -                /* SFI mode so read SFP module */
2001      -                goto sfp_check;
     2198 +                } else
     2199 +                        /* SFI mode so read SFP module */
     2200 +                        goto sfp_check;
     2201 +                break;
2002 2202          case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2003 2203                  if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2004 2204                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2005 2205                  else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2006 2206                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2007 2207                  else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2008 2208                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2009 2209                  goto out;
     2210 +                break;
2010 2211          case IXGBE_AUTOC_LMS_10G_SERIAL:
2011 2212                  if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2012 2213                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2013 2214                          goto out;
2014 2215                  } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2015 2216                          goto sfp_check;
2016 2217                  break;
2017 2218          case IXGBE_AUTOC_LMS_KX4_KX_KR:
2018 2219          case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2019 2220                  if (autoc & IXGBE_AUTOC_KX_SUPP)
2020 2221                          physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2021 2222                  if (autoc & IXGBE_AUTOC_KX4_SUPP)
2022 2223                          physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2023 2224                  if (autoc & IXGBE_AUTOC_KR_SUPP)
2024 2225                          physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2025 2226                  goto out;
     2227 +                break;
2026 2228          default:
2027 2229                  goto out;
     2230 +                break;
2028 2231          }
2029 2232  
2030 2233  sfp_check:
2031 2234          /* SFP check must be done last since DA modules are sometimes used to
2032 2235           * test KR mode -  we need to id KR mode correctly before SFP module.
2033 2236           * Call identify_sfp because the pluggable module may have changed */
2034      -        hw->phy.ops.identify_sfp(hw);
2035      -        if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2036      -                goto out;
2037      -
2038      -        switch (hw->phy.type) {
2039      -        case ixgbe_phy_sfp_passive_tyco:
2040      -        case ixgbe_phy_sfp_passive_unknown:
2041      -                physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2042      -                break;
2043      -        case ixgbe_phy_sfp_ftl_active:
2044      -        case ixgbe_phy_sfp_active_unknown:
2045      -                physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2046      -                break;
2047      -        case ixgbe_phy_sfp_avago:
2048      -        case ixgbe_phy_sfp_ftl:
2049      -        case ixgbe_phy_sfp_intel:
2050      -        case ixgbe_phy_sfp_unknown:
2051      -                hw->phy.ops.read_i2c_eeprom(hw,
2052      -                      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2053      -                hw->phy.ops.read_i2c_eeprom(hw,
2054      -                      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2055      -                if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2056      -                        physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2057      -                else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2058      -                        physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2059      -                else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2060      -                        physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2061      -                else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2062      -                        physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2063      -                break;
2064      -        default:
2065      -                break;
2066      -        }
2067      -
     2237 +        physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2068 2238  out:
2069 2239          return physical_layer;
2070 2240  }
2071 2241  
2072 2242  /**
2073 2243   *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2074 2244   *  @hw: pointer to hardware structure
2075 2245   *  @regval: register value to write to RXCTRL
2076 2246   *
2077 2247   *  Enables the Rx DMA unit for 82599
↓ open down ↓ 5 lines elided ↑ open up ↑
2083 2253  
2084 2254          /*
2085 2255           * Workaround for 82599 silicon errata when enabling the Rx datapath.
2086 2256           * If traffic is incoming before we enable the Rx unit, it could hang
2087 2257           * the Rx DMA unit.  Therefore, make sure the security engine is
2088 2258           * completely disabled prior to enabling the Rx unit.
2089 2259           */
2090 2260  
2091 2261          hw->mac.ops.disable_sec_rx_path(hw);
2092 2262  
2093      -        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
     2263 +        if (regval & IXGBE_RXCTRL_RXEN)
     2264 +                ixgbe_enable_rx(hw);
     2265 +        else
     2266 +                ixgbe_disable_rx(hw);
2094 2267  
2095 2268          hw->mac.ops.enable_sec_rx_path(hw);
2096 2269  
2097 2270          return IXGBE_SUCCESS;
2098 2271  }
2099 2272  
2100 2273  /**
2101      - *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
     2274 + *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
2102 2275   *  @hw: pointer to hardware structure
2103 2276   *
2104 2277   *  Verifies that installed the firmware version is 0.6 or higher
2105 2278   *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2106 2279   *
2107 2280   *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2108 2281   *  if the FW version is not supported.
2109 2282   **/
2110 2283  static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2111 2284  {
2112 2285          s32 status = IXGBE_ERR_EEPROM_VERSION;
2113 2286          u16 fw_offset, fw_ptp_cfg_offset;
2114      -        u16 fw_version = 0;
     2287 +        u16 fw_version;
2115 2288  
2116 2289          DEBUGFUNC("ixgbe_verify_fw_version_82599");
2117 2290  
2118 2291          /* firmware check is only necessary for SFI devices */
2119 2292          if (hw->phy.media_type != ixgbe_media_type_fiber) {
2120 2293                  status = IXGBE_SUCCESS;
2121 2294                  goto fw_version_out;
2122 2295          }
2123 2296  
2124 2297          /* get the offset to the Firmware Module block */
2125      -        hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
     2298 +        if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
     2299 +                ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
     2300 +                              "eeprom read at offset %d failed", IXGBE_FW_PTR);
     2301 +                return IXGBE_ERR_EEPROM_VERSION;
     2302 +        }
2126 2303  
2127 2304          if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2128 2305                  goto fw_version_out;
2129 2306  
2130 2307          /* get the offset to the Pass Through Patch Configuration block */
2131      -        hw->eeprom.ops.read(hw, (fw_offset +
     2308 +        if (hw->eeprom.ops.read(hw, (fw_offset +
2132 2309                                   IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2133      -                                 &fw_ptp_cfg_offset);
     2310 +                                 &fw_ptp_cfg_offset)) {
     2311 +                ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
     2312 +                              "eeprom read at offset %d failed",
     2313 +                              fw_offset +
     2314 +                              IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
     2315 +                return IXGBE_ERR_EEPROM_VERSION;
     2316 +        }
2134 2317  
2135 2318          if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2136 2319                  goto fw_version_out;
2137 2320  
2138 2321          /* get the firmware version */
2139      -        hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2140      -                            IXGBE_FW_PATCH_VERSION_4), &fw_version);
     2322 +        if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
     2323 +                            IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
     2324 +                ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
     2325 +                              "eeprom read at offset %d failed",
     2326 +                              fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
     2327 +                return IXGBE_ERR_EEPROM_VERSION;
     2328 +        }
2141 2329  
2142 2330          if (fw_version > 0x5)
2143 2331                  status = IXGBE_SUCCESS;
2144 2332  
2145 2333  fw_version_out:
2146 2334          return status;
2147 2335  }
2148 2336  
2149 2337  /**
2150 2338   *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
↓ open down ↓ 19 lines elided ↑ open up ↑
2170 2358  
2171 2359          /* get the offset to the LESM Parameters block */
2172 2360          status = hw->eeprom.ops.read(hw, (fw_offset +
2173 2361                                       IXGBE_FW_LESM_PARAMETERS_PTR),
2174 2362                                       &fw_lesm_param_offset);
2175 2363  
2176 2364          if ((status != IXGBE_SUCCESS) ||
2177 2365              (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2178 2366                  goto out;
2179 2367  
2180      -        /* get the lesm state word */
     2368 +        /* get the LESM state word */
2181 2369          status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2182 2370                                       IXGBE_FW_LESM_STATE_1),
2183 2371                                       &fw_lesm_state);
2184 2372  
2185 2373          if ((status == IXGBE_SUCCESS) &&
2186 2374              (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2187 2375                  lesm_enabled = TRUE;
2188 2376  
2189 2377  out:
2190 2378          return lesm_enabled;
↓ open down ↓ 58 lines elided ↑ open up ↑
2249 2437           */
2250 2438          if ((eeprom->type == ixgbe_eeprom_spi) &&
2251 2439              (offset <= IXGBE_EERD_MAX_ADDR))
2252 2440                  ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2253 2441          else
2254 2442                  ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2255 2443  
2256 2444          return ret_val;
2257 2445  }
2258 2446  
     2447 +/**
     2448 + * ixgbe_reset_pipeline_82599 - perform pipeline reset
     2449 + *
     2450 + *  @hw: pointer to hardware structure
     2451 + *
     2452 + * Reset pipeline by asserting Restart_AN together with LMS change to ensure
     2453 + * full pipeline reset.  This function assumes the SW/FW lock is held.
     2454 + **/
     2455 +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
     2456 +{
     2457 +        s32 ret_val;
     2458 +        u32 anlp1_reg = 0;
     2459 +        u32 i, autoc_reg, autoc2_reg;
2259 2460  
     2461 +        /* Enable link if disabled in NVM */
     2462 +        autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
     2463 +        if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
     2464 +                autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
     2465 +                IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
     2466 +                IXGBE_WRITE_FLUSH(hw);
     2467 +        }
     2468 +
     2469 +        autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
     2470 +        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
     2471 +        /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
     2472 +        IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
     2473 +                        autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
     2474 +        /* Wait for AN to leave state 0 */
     2475 +        for (i = 0; i < 10; i++) {
     2476 +                msec_delay(4);
     2477 +                anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
     2478 +                if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
     2479 +                        break;
     2480 +        }
     2481 +
     2482 +        if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
     2483 +                DEBUGOUT("auto negotiation not completed\n");
     2484 +                ret_val = IXGBE_ERR_RESET_FAILED;
     2485 +                goto reset_pipeline_out;
     2486 +        }
     2487 +
     2488 +        ret_val = IXGBE_SUCCESS;
     2489 +
     2490 +reset_pipeline_out:
     2491 +        /* Write AUTOC register with original LMS field and Restart_AN */
     2492 +        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
     2493 +        IXGBE_WRITE_FLUSH(hw);
     2494 +
     2495 +        return ret_val;
     2496 +}
     2497 +
     2498 +/**
     2499 + *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
     2500 + *  @hw: pointer to hardware structure
     2501 + *  @byte_offset: byte offset to read
     2502 + *  @data: value read
     2503 + *
     2504 + *  Performs byte read operation to SFP module's EEPROM over I2C interface at
     2505 + *  a specified device address.
     2506 + **/
     2507 +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     2508 +                                u8 dev_addr, u8 *data)
     2509 +{
     2510 +        u32 esdp;
     2511 +        s32 status;
     2512 +        s32 timeout = 200;
     2513 +
     2514 +        DEBUGFUNC("ixgbe_read_i2c_byte_82599");
     2515 +
     2516 +        if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
     2517 +                /* Acquire I2C bus ownership. */
     2518 +                esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
     2519 +                esdp |= IXGBE_ESDP_SDP0;
     2520 +                IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
     2521 +                IXGBE_WRITE_FLUSH(hw);
     2522 +
     2523 +                while (timeout) {
     2524 +                        esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
     2525 +                        if (esdp & IXGBE_ESDP_SDP1)
     2526 +                                break;
     2527 +
     2528 +                        msec_delay(5);
     2529 +                        timeout--;
     2530 +                }
     2531 +
     2532 +                if (!timeout) {
     2533 +                        DEBUGOUT("Driver can't access resource,"
     2534 +                                 " acquiring I2C bus timeout.\n");
     2535 +                        status = IXGBE_ERR_I2C;
     2536 +                        goto release_i2c_access;
     2537 +                }
     2538 +        }
     2539 +
     2540 +        status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
     2541 +
     2542 +release_i2c_access:
     2543 +
     2544 +        if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
     2545 +                /* Release I2C bus ownership. */
     2546 +                esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
     2547 +                esdp &= ~IXGBE_ESDP_SDP0;
     2548 +                IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
     2549 +                IXGBE_WRITE_FLUSH(hw);
     2550 +        }
     2551 +
     2552 +        return status;
     2553 +}
     2554 +
     2555 +/**
     2556 + *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
     2557 + *  @hw: pointer to hardware structure
     2558 + *  @byte_offset: byte offset to write
     2559 + *  @data: value to write
     2560 + *
     2561 + *  Performs byte write operation to SFP module's EEPROM over I2C interface at
     2562 + *  a specified device address.
     2563 + **/
     2564 +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
     2565 +                                 u8 dev_addr, u8 data)
     2566 +{
     2567 +        u32 esdp;
     2568 +        s32 status;
     2569 +        s32 timeout = 200;
     2570 +
     2571 +        DEBUGFUNC("ixgbe_write_i2c_byte_82599");
     2572 +
     2573 +        if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
     2574 +                /* Acquire I2C bus ownership. */
     2575 +                esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
     2576 +                esdp |= IXGBE_ESDP_SDP0;
     2577 +                IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
     2578 +                IXGBE_WRITE_FLUSH(hw);
     2579 +
     2580 +                while (timeout) {
     2581 +                        esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
     2582 +                        if (esdp & IXGBE_ESDP_SDP1)
     2583 +                                break;
     2584 +
     2585 +                        msec_delay(5);
     2586 +                        timeout--;
     2587 +                }
     2588 +
     2589 +                if (!timeout) {
     2590 +                        DEBUGOUT("Driver can't access resource,"
     2591 +                                 " acquiring I2C bus timeout.\n");
     2592 +                        status = IXGBE_ERR_I2C;
     2593 +                        goto release_i2c_access;
     2594 +                }
     2595 +        }
     2596 +
     2597 +        status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
     2598 +
     2599 +release_i2c_access:
     2600 +
     2601 +        if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
     2602 +                /* Release I2C bus ownership. */
     2603 +                esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
     2604 +                esdp &= ~IXGBE_ESDP_SDP0;
     2605 +                IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
     2606 +                IXGBE_WRITE_FLUSH(hw);
     2607 +        }
     2608 +
     2609 +        return status;
     2610 +}
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX