On Thu, Nov 17, 2016 at 23:23 +0100, Mike Belopuhov wrote:
> On Thu, Nov 17, 2016 at 22:24 +0100, Mike Belopuhov wrote:
> > On Wed, Nov 16, 2016 at 23:04 +0100, Mike Belopuhov wrote:
> > > Hi,
> > > 
> > > I've done a massive update of our ix(4) driver that brings
> > > support for X550 family of controllers including those
> > > integrated into new Xeon chips as well as QSFP support for
> > > X520 (82599) but this needs thorough testing.  If you're
> > > using Intel 10Gb controllers, please make sure that you
> > > either (or both!) test the complete diff found at this URL:
> > > http://gir.theapt.org/~mike/ixgbe.diff or next few snapshots
> > > that will (hopefully) contain bits of this monster diff.
> > > 
> > > To test the monster diff, make sure that you are running a
> > > recent snapshot and your kernel source code is up-to-date,
> > > then reset a few files to the specified revisions and
> > > remove the support file for X550:
> > > 
> > >     % pwd
> > >     /usr/src
> > >     % cvs up -r1.326 sys/dev/pci/files.pci
> > >     % cvs up -r1.133 sys/dev/pci/if_ix.c
> > >     % cvs up -r1.14 sys/dev/pci/ixgbe.c
> > >     % cvs up -r1.23 sys/dev/pci/ixgbe.h
> > >     % cvs up -r1.11 sys/dev/pci/ixgbe_82598.c
> > >     % cvs up -r1.12 sys/dev/pci/ixgbe_82599.c
> > >     % cvs up -r1.13 sys/dev/pci/ixgbe_phy.c
> > >     % cvs up -r1.22 sys/dev/pci/ixgbe_type.h
> > >     % cvs up -r1.4 sys/dev/pci/ixgbe_x540.c
> > >     % rm -f sys/dev/pci/ixgbe_x550.c
> > > 
> > > To verify that files have been reset:
> > > 
> > >     % pwd
> > >     /usr/src
> > >     % fgrep "//T1" sys/dev/pci/CVS/Entries
> > >     /files.pci/1.326/Mon Sep 12 09:45:53 2016//T1.326
> > >     /if_ix.c/1.133/Thu Oct 27 05:00:50 2016//T1.133
> > >     /ixgbe.c/1.14/Wed Nov 26 17:03:52 2014//T1.14
> > >     /ixgbe.h/1.23/Tue Oct  4 09:24:02 2016//T1.23
> > >     /ixgbe_82598.c/1.11/Mon Aug  5 19:58:06 2013//T1.11
> > >     /ixgbe_82599.c/1.12/Fri May  1 04:15:00 2015//T1.12
> > >     /ixgbe_phy.c/1.13/Fri May  1 04:15:00 2015//T1.13
> > >     /ixgbe_type.h/1.22/Wed Nov 16 21:53:57 2016//T1.22
> > >     /ixgbe_x540.c/1.4/Wed May 20 14:34:27 2015//T1.4
> > > 
> > > And then test and apply the diff:
> > > 
> > >     % pwd
> > >     /usr/src
> > >     % patch -Csp0 </tmp/ixgbe.diff && patch -sp0 </tmp/ixgbe.diff
> > > 
> > > Make sure to reset files every time the source tree gets updated.
> > > 
> > > Cheers,
> > > Mike
> > 
> > As of today and file revisions below, most of the boilerplate
> > code is now committed.  I've tried very hard to not introduce
> > any noticeable changes in behavior so far.
> > 
> >     if_ix.c/1.133
> >     ixgbe_82598.c/1.15
> >     ixgbe_x550.c/1.1
> >     ixgbe.c/1.19
> >     ixgbe.h/1.26
> >     ixgbe_82599.c/1.16
> >     ixgbe_x540.c/1.9
> >     ixgbe_type.h/1.29
> >     ixgbe_phy.c/1.18
> > 
> > Remaining bits are below.  I'll start picking at low hanging fruits,
> > but won't mind another pair of eyes.
> > 
> 
> I've just realised I forgot to commit one small bit that is
> also not part of this diff that might fail the kernel compile.
> I'll post an update in about 12 hours.

OK, the issue is resolved now. ixgbe_x550.c should be at 1.2 now.

Here's an updated diff (Hrvoje Popovski has found out that
enable_tx_laser function pointers can be not set by X550).

diff --git sys/dev/pci/files.pci sys/dev/pci/files.pci
index a6b91fb..34ce9bf 100644
--- sys/dev/pci/files.pci
+++ sys/dev/pci/files.pci
@@ -356,10 +356,11 @@ attach    ix at pci
 file   dev/pci/if_ix.c                 ix
 file   dev/pci/ixgbe.c                 ix
 file   dev/pci/ixgbe_82598.c           ix
 file   dev/pci/ixgbe_82599.c           ix
 file   dev/pci/ixgbe_x540.c            ix
+file   dev/pci/ixgbe_x550.c            ix
 file   dev/pci/ixgbe_phy.c             ix
 
 # Neterion Xframe 10 Gigabit ethernet 
 device xge: ether, ifnet, ifmedia
 attach xge  at pci
diff --git sys/dev/pci/if_ix.c sys/dev/pci/if_ix.c
index d13baea..c07758b 100644
--- sys/dev/pci/if_ix.c
+++ sys/dev/pci/if_ix.c
@@ -69,14 +69,24 @@ const struct pci_matchid ixgbe_devices[] = {
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
        { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
+       { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
 };
 
 /*********************************************************************
  *  Function prototypes
  *********************************************************************/
@@ -96,11 +106,13 @@ int        ixgbe_allocate_pci_resources(struct ix_softc *);
 int    ixgbe_allocate_legacy(struct ix_softc *);
 int    ixgbe_allocate_queues(struct ix_softc *);
 void   ixgbe_free_pci_resources(struct ix_softc *);
 void   ixgbe_local_timer(void *);
 void   ixgbe_setup_interface(struct ix_softc *);
-void   ixgbe_config_link(struct ix_softc *sc);
+void   ixgbe_config_gpie(struct ix_softc *);
+void   ixgbe_config_delay_values(struct ix_softc *);
+void   ixgbe_config_link(struct ix_softc *);
 
 int    ixgbe_allocate_transmit_buffers(struct tx_ring *);
 int    ixgbe_setup_transmit_structures(struct ix_softc *);
 int    ixgbe_setup_transmit_ring(struct tx_ring *);
 void   ixgbe_initialize_transmit_units(struct ix_softc *);
@@ -111,10 +123,11 @@ int       ixgbe_allocate_receive_buffers(struct rx_ring 
*);
 int    ixgbe_setup_receive_structures(struct ix_softc *);
 int    ixgbe_setup_receive_ring(struct rx_ring *);
 void   ixgbe_initialize_receive_units(struct ix_softc *);
 void   ixgbe_free_receive_structures(struct ix_softc *);
 void   ixgbe_free_receive_buffers(struct rx_ring *);
+void   ixgbe_initialize_rss_mapping(struct ix_softc *);
 int    ixgbe_rxfill(struct rx_ring *);
 void   ixgbe_rxrefill(void *);
 
 void   ixgbe_enable_intr(struct ix_softc *);
 void   ixgbe_disable_intr(struct ix_softc *);
@@ -144,10 +157,11 @@ void      ixgbe_setup_vlan_hw_support(struct ix_softc *);
 
 /* Support for pluggable optic modules */
 void   ixgbe_setup_optics(struct ix_softc *);
 void   ixgbe_handle_mod(struct ix_softc *);
 void   ixgbe_handle_msf(struct ix_softc *);
+void   ixgbe_handle_phy(struct ix_softc *);
 
 /* Legacy (single vector interrupt handler */
 int    ixgbe_intr(void *);
 void   ixgbe_enable_queue(struct ix_softc *, uint32_t);
 void   ixgbe_disable_queue(struct ix_softc *, uint32_t);
@@ -252,11 +266,11 @@ ixgbe_attach(struct device *parent, struct device *self, 
void *aux)
 
        error = ixgbe_init_hw(hw);
        if (error == IXGBE_ERR_EEPROM_VERSION) {
                printf(": This device is a pre-production adapter/"
                    "LOM.  Please be aware there may be issues associated "
-                   "with your hardware.\n If you are experiencing problems "
+                   "with your hardware.\nIf you are experiencing problems "
                    "please contact your Intel or hardware representative "
                    "who provided you with this hardware.\n");
        } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
                printf(": Unsupported SFP+ Module\n");
        }
@@ -274,10 +288,18 @@ ixgbe_attach(struct device *parent, struct device *self, 
void *aux)
 
        error = ixgbe_allocate_legacy(sc);
        if (error)
                goto err_late;
 
+       /* Enable the optics for 82599 SFP+ fiber */
+       if (sc->hw.mac.ops.enable_tx_laser)
+               sc->hw.mac.ops.enable_tx_laser(&sc->hw);
+
+       /* Enable power to the phy */
+       if (hw->phy.ops.set_phy_power)
+               hw->phy.ops.set_phy_power(&sc->hw, TRUE);
+
        /* Setup OS specific network interface */
        ixgbe_setup_interface(sc);
 
        /* Initialize statistics */
        ixgbe_update_stats_counters(sc);
@@ -583,11 +605,11 @@ void
 ixgbe_init(void *arg)
 {
        struct ix_softc *sc = (struct ix_softc *)arg;
        struct ifnet    *ifp = &sc->arpcom.ac_if;
        struct rx_ring  *rxr = sc->rx_rings;
-       uint32_t         k, txdctl, rxdctl, rxctrl, mhadd, gpie, itr;
+       uint32_t         k, txdctl, rxdctl, rxctrl, mhadd, itr;
        int              i, s, err;
 
        INIT_DEBUGOUT("ixgbe_init: begin");
 
        s = splnet();
@@ -628,56 +650,23 @@ ixgbe_init(void *arg)
        }
 
        /* Configure RX settings */
        ixgbe_initialize_receive_units(sc);
 
+       /* Enable SDP & MSIX interrupts based on adapter */
+       ixgbe_config_gpie(sc);
+
        /* Program promiscuous mode and multicast filters. */
        ixgbe_iff(sc);
 
-       gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
-
-       /* Enable Fan Failure Interrupt */
-       gpie |= IXGBE_SDP1_GPIEN;
-
-       if (sc->hw.mac.type == ixgbe_mac_82599EB) {
-               /* Add for Module detection */
-               gpie |= IXGBE_SDP2_GPIEN;
-
-               /*
-                * Set LL interval to max to reduce the number of low latency
-                * interrupts hitting the card when the ring is getting full.
-                */
-               gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
-       }
-
-       if (sc->hw.mac.type == ixgbe_mac_X540) {
-               /* Thermal Failure Detection */
-               gpie |= IXGBE_SDP0_GPIEN;
-
-               /*
-                * Set LL interval to max to reduce the number of low latency
-                * interrupts hitting the card when the ring is getting full.
-                */
-               gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
-       }
-
-       if (sc->msix > 1) {
-               /* Enable Enhanced MSIX mode */
-               gpie |= IXGBE_GPIE_MSIX_MODE;
-               gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
-                   IXGBE_GPIE_OCD;
-       }
-       IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
-
        /* Set MRU size */
        mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
        mhadd &= ~IXGBE_MHADD_MFS_MASK;
        mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
        IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
 
        /* Now enable all the queues */
-
        for (i = 0; i < sc->num_queues; i++) {
                txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
                txdctl |= IXGBE_TXDCTL_ENABLE;
                /* Set WTHRESH to 8, burst writeback */
                txdctl |= (8 << 16);
@@ -756,39 +745,20 @@ ixgbe_init(void *arg)
        itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
        if (sc->hw.mac.type != ixgbe_mac_82598EB)
                itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
        IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
 
+       /* Enable power to the phy */
+       if (sc->hw.phy.ops.set_phy_power)
+               sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
+
        /* Config/Enable Link */
        ixgbe_config_link(sc);
 
        /* Hardware Packet Buffer & Flow Control setup */
-       {
-               uint32_t rxpb, frame, size, tmp;
-
-               frame = sc->max_frame_size;
-
-               /* Calculate High Water */
-               if (sc->hw.mac.type == ixgbe_mac_X540)
-                       tmp = IXGBE_DV_X540(frame, frame);
-               else
-                       tmp = IXGBE_DV(frame, frame);
-               size = IXGBE_BT2KB(tmp);
-               rxpb = IXGBE_READ_REG(&sc->hw, IXGBE_RXPBSIZE(0)) >> 10;
-               sc->hw.fc.high_water[0] = rxpb - size;
-
-               /* Now calculate Low Water */
-               if (sc->hw.mac.type == ixgbe_mac_X540)
-                       tmp = IXGBE_LOW_DV_X540(frame);
-               else
-                       tmp = IXGBE_LOW_DV(frame);
-               sc->hw.fc.low_water[0] = IXGBE_BT2KB(tmp);
+       ixgbe_config_delay_values(sc);
 
-               sc->hw.fc.requested_mode = sc->fc;
-               sc->hw.fc.pause_time = IXGBE_FC_PAUSE;
-               sc->hw.fc.send_xon = TRUE;
-       }
        /* Initialize the FC settings */
        sc->hw.mac.ops.start_hw(&sc->hw);
 
        /* And now turn on interrupts */
        ixgbe_enable_intr(sc);
@@ -798,10 +768,107 @@ ixgbe_init(void *arg)
        ifq_clr_oactive(&ifp->if_snd);
 
        splx(s);
 }
 
+void
+ixgbe_config_gpie(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t gpie;
+
+       gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
+
+       /* Fan Failure Interrupt */
+       if (hw->device_id == IXGBE_DEV_ID_82598AT)
+               gpie |= IXGBE_SDP1_GPIEN;
+
+       if (sc->hw.mac.type == ixgbe_mac_82599EB) {
+               /* Add for Module detection */
+               gpie |= IXGBE_SDP2_GPIEN;
+
+               /* Media ready */
+               if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
+                       gpie |= IXGBE_SDP1_GPIEN;
+
+               /*
+                * Set LL interval to max to reduce the number of low latency
+                * interrupts hitting the card when the ring is getting full.
+                */
+               gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
+       }
+
+       if (sc->hw.mac.type == ixgbe_mac_X540 ||
+           hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+           hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+               /*
+                * Thermal Failure Detection (X540)
+                * Link Detection (X552 SFP+, X552/X557-AT)
+                */
+               gpie |= IXGBE_SDP0_GPIEN_X540;
+
+               /*
+                * Set LL interval to max to reduce the number of low latency
+                * interrupts hitting the card when the ring is getting full.
+                */
+               gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
+       }
+
+       if (sc->msix > 1) {
+               /* Enable Enhanced MSIX mode */
+               gpie |= IXGBE_GPIE_MSIX_MODE;
+               gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
+                   IXGBE_GPIE_OCD;
+       }
+
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
+}
+
+/*
+ * Requires sc->max_frame_size to be set.
+ */
+void
+ixgbe_config_delay_values(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t rxpb, frame, size, tmp;
+
+       frame = sc->max_frame_size;
+
+       /* Calculate High Water */
+       switch (hw->mac.type) {
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               tmp = IXGBE_DV_X540(frame, frame);
+               break;
+       default:
+               tmp = IXGBE_DV(frame, frame);
+               break;
+       }
+       size = IXGBE_BT2KB(tmp);
+       rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
+       hw->fc.high_water[0] = rxpb - size;
+
+       /* Now calculate Low Water */
+       switch (hw->mac.type) {
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               tmp = IXGBE_LOW_DV_X540(frame);
+               break;
+       default:
+               tmp = IXGBE_LOW_DV(frame);
+               break;
+       }
+       hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
+
+       hw->fc.requested_mode = sc->fc;
+       hw->fc.pause_time = IXGBE_FC_PAUSE;
+       hw->fc.send_xon = TRUE;
+}
+
 /*
  * MSIX Interrupt Handlers
  */
 void
 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
@@ -853,11 +920,11 @@ ixgbe_intr(void *arg)
        struct ix_softc *sc = (struct ix_softc *)arg;
        struct ix_queue *que = sc->queues;
        struct ifnet    *ifp = &sc->arpcom.ac_if;
        struct tx_ring  *txr = sc->tx_rings;
        struct ixgbe_hw *hw = &sc->hw;
-       uint32_t         reg_eicr;
+       uint32_t         reg_eicr, mod_mask, msf_mask;
        int              i, refill = 0;
 
        reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
        if (reg_eicr == 0) {
                ixgbe_enable_intr(sc);
@@ -885,25 +952,47 @@ ixgbe_intr(void *arg)
                ixgbe_update_link_status(sc);
                KERNEL_UNLOCK();
                ifq_start(&ifp->if_snd);
        }
 
-       /* ... more link status change */
        if (hw->mac.type != ixgbe_mac_82598EB) {
-               if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
+               if (reg_eicr & IXGBE_EICR_ECC) {
+                       printf("%s: CRITICAL: ECC ERROR!! "
+                           "Please Reboot!!\n", sc->dev.dv_xname);
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+               }
+               /* Check for over temp condition */
+               if (reg_eicr & IXGBE_EICR_TS) {
+                       printf("%s: CRITICAL: OVER TEMP!! "
+                           "PHY IS SHUT DOWN!!\n", ifp->if_xname);
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
+               }
+       }
+
+       /* Pluggable optics-related interrupt */
+       if (ixgbe_is_sfp(hw)) {
+               if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+                       mod_mask = IXGBE_EICR_GPI_SDP0_X540;
+                       msf_mask = IXGBE_EICR_GPI_SDP1_X540;
+               } else if (hw->mac.type == ixgbe_mac_X540 ||
+                   hw->mac.type == ixgbe_mac_X550 ||
+                   hw->mac.type == ixgbe_mac_X550EM_x) {
+                       mod_mask = IXGBE_EICR_GPI_SDP2_X540;
+                       msf_mask = IXGBE_EICR_GPI_SDP1_X540;
+               } else {
+                       mod_mask = IXGBE_EICR_GPI_SDP2;
+                       msf_mask = IXGBE_EICR_GPI_SDP1;
+               }
+               if (reg_eicr & mod_mask) {
                        /* Clear the interrupt */
-                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
-                       KERNEL_LOCK();
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
                        ixgbe_handle_mod(sc);
-                       KERNEL_UNLOCK();
                } else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
-                   (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+                   (reg_eicr & msf_mask)) {
                        /* Clear the interrupt */
-                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
-                       KERNEL_LOCK();
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
                        ixgbe_handle_msf(sc);
-                       KERNEL_UNLOCK();
                }
        }
 
        /* Check for fan failure */
        if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
@@ -911,16 +1000,16 @@ ixgbe_intr(void *arg)
                printf("%s: CRITICAL: FAN FAILURE!! "
                    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
                IXGBE_WRITE_REG(&sc->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
        }
 
-       /* Check for over temp condition */
-       if ((hw->mac.type == ixgbe_mac_X540) &&
-           (reg_eicr & IXGBE_EICR_TS)) {
-               printf("%s: CRITICAL: OVER TEMP!! "
-                   "PHY IS SHUT DOWN!!\n", ifp->if_xname);
-               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
+       /* External PHY interrupt */
+       if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
+               /* Clear the interrupt */
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
+               ixgbe_handle_phy(sc);
        }
 
        for (i = 0; i < sc->num_queues; i++, que++)
                ixgbe_enable_queue(sc, que->msix);
 
@@ -1284,11 +1373,11 @@ ixgbe_stop(void *arg)
        sc->hw.adapter_stopped = FALSE;
        sc->hw.mac.ops.stop_adapter(&sc->hw);
        if (sc->hw.mac.type == ixgbe_mac_82599EB)
                sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
        /* Turn off the laser */
-       if (sc->hw.phy.multispeed_fiber)
+       if (sc->hw.mac.ops.disable_tx_laser)
                sc->hw.mac.ops.disable_tx_laser(&sc->hw);
        timeout_del(&sc->timer);
        timeout_del(&sc->rx_refill);
 
        /* reprogram the RAR[0] in case user changed it. */
@@ -1328,54 +1417,19 @@ ixgbe_identify_hardware(struct ix_softc *sc)
 
        reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
        sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
        sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
 
-       switch (sc->hw.device_id) {
-       case PCI_PRODUCT_INTEL_82598:
-       case PCI_PRODUCT_INTEL_82598AF_DUAL:
-       case PCI_PRODUCT_INTEL_82598_DA_DUAL:
-       case PCI_PRODUCT_INTEL_82598AF:
-       case PCI_PRODUCT_INTEL_82598_SR_DUAL_EM:
-       case PCI_PRODUCT_INTEL_82598EB_SFP:
-       case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
-       case PCI_PRODUCT_INTEL_82598EB_CX4:
-       case PCI_PRODUCT_INTEL_82598EB_XF_LR:
-       case PCI_PRODUCT_INTEL_82598AT:
-       case PCI_PRODUCT_INTEL_82598AT2:
-       case PCI_PRODUCT_INTEL_82598AT_DUAL:
-       case PCI_PRODUCT_INTEL_82598_BX:
-               sc->hw.mac.type = ixgbe_mac_82598EB;
-               break;
-       case PCI_PRODUCT_INTEL_82599EN_SFP:
-       case PCI_PRODUCT_INTEL_82599_SFP:
-       case PCI_PRODUCT_INTEL_82599_SFP_EM:
-       case PCI_PRODUCT_INTEL_82599_SFP_FCOE:
-       case PCI_PRODUCT_INTEL_82599_SFP_SF2:
-       case PCI_PRODUCT_INTEL_82599_KX4:
-       case PCI_PRODUCT_INTEL_82599_KX4_MEZZ:
-       case PCI_PRODUCT_INTEL_82599_CX4:
-       case PCI_PRODUCT_INTEL_82599_T3_LOM:
-       case PCI_PRODUCT_INTEL_82599_XAUI:
-       case PCI_PRODUCT_INTEL_82599_COMBO_BP:
-       case PCI_PRODUCT_INTEL_82599_BPLANE_FCOE:
-               sc->hw.mac.type = ixgbe_mac_82599EB;
-               break;
-       case PCI_PRODUCT_INTEL_82599VF:
-               sc->hw.mac.type = ixgbe_mac_82599_vf;
-               break;
-       case PCI_PRODUCT_INTEL_X540T:
-               sc->hw.mac.type = ixgbe_mac_X540;
-               break;
-       default:
-               break;
-       }
+       /* We need this here to set the num_segs below */
+       ixgbe_set_mac_type(&sc->hw);
 
        /* Pick up the 82599 and VF settings */
-       if (sc->hw.mac.type != ixgbe_mac_82598EB)
+       if (sc->hw.mac.type != ixgbe_mac_82598EB) {
                sc->hw.phy.smart_speed = ixgbe_smart_speed;
-       sc->num_segs = IXGBE_82599_SCATTER;
+               sc->num_segs = IXGBE_82599_SCATTER;
+       } else
+               sc->num_segs = IXGBE_82598_SCATTER;
 }
 
 /*********************************************************************
  *
  *  Determine optic type
@@ -1407,10 +1461,12 @@ ixgbe_setup_optics(struct ix_softc *sc)
                sc->optics = IFM_10G_CX4;
        else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
                sc->optics = IFM_1000_SX;
        else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
                sc->optics = IFM_1000_LX;
+       /* If we get here just set the default */
+       sc->optics = IFM_ETHER | IFM_AUTO;
 }
 
 /*********************************************************************
  *
  *  Setup the Legacy or MSI Interrupt handler
@@ -1566,30 +1622,17 @@ ixgbe_setup_interface(struct ix_softc *sc)
 
 void
 ixgbe_config_link(struct ix_softc *sc)
 {
        uint32_t        autoneg, err = 0;
-       bool            sfp, negotiate;
-
-       switch (sc->hw.phy.type) {
-       case ixgbe_phy_sfp_avago:
-       case ixgbe_phy_sfp_ftl:
-       case ixgbe_phy_sfp_intel:
-       case ixgbe_phy_sfp_unknown:
-       case ixgbe_phy_sfp_passive_tyco:
-       case ixgbe_phy_sfp_passive_unknown:
-               sfp = TRUE;
-               break;
-       default:
-               sfp = FALSE;
-               break;
-       }
+       bool            negotiate;
 
-       if (sfp) {
+       if (ixgbe_is_sfp(&sc->hw)) {
                if (sc->hw.phy.multispeed_fiber) {
                        sc->hw.mac.ops.setup_sfp(&sc->hw);
-                       sc->hw.mac.ops.enable_tx_laser(&sc->hw);
+                       if (sc->hw.mac.ops.enable_tx_laser)
+                               sc->hw.mac.ops.enable_tx_laser(&sc->hw);
                        ixgbe_handle_msf(sc);
                } else
                        ixgbe_handle_mod(sc);
        } else {
                if (sc->hw.mac.ops.check_link)
@@ -1602,11 +1645,11 @@ ixgbe_config_link(struct ix_softc *sc)
                        err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
                            &autoneg, &negotiate);
                if (err)
                        return;
                if (sc->hw.mac.ops.setup_link)
-                       err = sc->hw.mac.ops.setup_link(&sc->hw,
+                       sc->hw.mac.ops.setup_link(&sc->hw,
                            autoneg, sc->link_up);
        }
 }
 
 /********************************************************************
@@ -2188,133 +2231,10 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf 
*mp,
        atomic_dec_int(&txr->tx_avail);
 
        return (0);
 }
 
-#ifdef notyet
-/**********************************************************************
- *
- *  Setup work for hardware segmentation offload (TSO) on
- *  adapters using advanced tx descriptors
- *
- **********************************************************************/
-int
-ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
-    uint32_t *cmd_type_len, uint32_t *olinfo_status)
-{
-       struct ix_softc *sc = txr->sc;
-       struct ixgbe_adv_tx_context_desc *TXD;
-       uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
-       uint32_t mss_l4len_idx = 0, paylen;
-       int ctxd, ehdrlen, ip_hlen, tcp_hlen;
-       uint16_t etype;
-#if NVLAN > 0
-       uint16_t vtag = 0;
-       struct ether_vlan_header *eh;
-#else
-       struct ether_header *eh;
-#endif
-       struct ip *ip;
-       struct ip6_hdr *ip6;
-       struct tcphdr *th;
-
-       /*
-        * Determine where frame payload starts.
-        * Jump over vlan headers if already present
-        */
-#if NVLAN > 0
-       eh = mtod(mp, struct ether_vlan_header *);
-       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
-               etype = ntohs(eh->evl_proto);
-               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
-       } else {
-               etype = ntohs(eh->evl_encap_proto);
-               ehdrlen = ETHER_HDR_LEN;
-       }
-#else
-       eh = mtod(mp, struct ether_header *);
-       etype = ntohs(eh->ether_type);
-       ehdrlen = ETHER_HDR_LEN;
-#endif
-
-       switch (etype) {
-       case ETHERTYPE_IPV6:
-               ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
-               /* XXX-BZ For now we do not pretend to support ext. hdrs. */
-               if (ip6->ip6_nxt != IPPROTO_TCP)
-                       return (ENXIO);
-               ip_hlen = sizeof(struct ip6_hdr);
-               ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
-               th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
-               th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
-               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
-               break;
-       case ETHERTYPE_IP:
-               ip = (struct ip *)(mp->m_data + ehdrlen);
-               if (ip->ip_p != IPPROTO_TCP)
-                       return (ENXIO);
-               ip->ip_sum = 0;
-               ip_hlen = ip->ip_hl << 2;
-               th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
-               th->th_sum = in_pseudo(ip->ip_src.s_addr,
-                   ip->ip_dst.s_addr, htons(IPPROTO_TCP));
-               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
-               /* Tell transmit desc to also do IPv4 checksum. */
-               *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
-               break;
-       default:
-               panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
-                   __func__, ntohs(etype));
-               break;
-       }
-
-       ctxd = txr->next_avail_desc;
-       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
-       tcp_hlen = th->th_off << 2;
-
-       /* This is used in the transmit desc in encap */
-       paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
-
-#if NVLAN > 0
-       /* VLAN MACLEN IPLEN */
-       if (mp->m_flags & M_VLANTAG) {
-               vtag = mp->m_pkthdr.ether_vtag;
-               vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
-       }
-#endif
-
-       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
-       vlan_macip_lens |= ip_hlen;
-       TXD->vlan_macip_lens = htole32(vlan_macip_lens);
-
-       /* ADV DTYPE TUCMD */
-       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
-       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-       TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
-
-       /* MSS L4LEN IDX */
-       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
-       mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
-       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
-       TXD->seqnum_seed = htole32(0);
-
-       membar_producer();
-
-       if (++ctxd == sc->num_tx_desc)
-               ctxd = 0;
-
-       atomic_dec_int(&txr->tx_avail);
-       txr->next_avail_desc = ctxd;
-       *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
-       *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-       *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
-       return TRUE;
-}
-#endif
-
 /**********************************************************************
  *
  *  Examine each tx_buffer in the used queue. If the hardware is done
  *  processing the packet then free associated resources. The
  *  tx_buffer is put back on the free queue.
@@ -2490,21 +2410,19 @@ int
 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
 {
        struct ix_softc         *sc = rxr->sc;
        struct ifnet            *ifp = &sc->arpcom.ac_if;
        struct ixgbe_rx_buf     *rxbuf;
-       int                     i, bsize, error;
+       int                     i, error;
 
-       bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
        if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
            sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
                printf("%s: Unable to allocate rx_buffer memory\n",
                    ifp->if_xname);
                error = ENOMEM;
                goto fail;
        }
-       bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
 
        rxbuf = rxr->rx_buffers;
        for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
                error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
                    16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
@@ -2633,109 +2551,144 @@ fail:
 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 
 void
 ixgbe_initialize_receive_units(struct ix_softc *sc)
 {
-       struct  rx_ring *rxr = sc->rx_rings;
-       uint32_t        bufsz, rxctrl, fctrl, srrctl, rxcsum;
-       uint32_t        reta, mrqc = 0, hlreg;
-       uint32_t        random[10];
+       struct rx_ring  *rxr = sc->rx_rings;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t        bufsz, fctrl, srrctl, rxcsum;
+       uint32_t        hlreg;
        int             i;
 
        /*
         * Make sure receives are disabled while
         * setting up the descriptor ring
         */
-       rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
-       IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL,
-           rxctrl & ~IXGBE_RXCTRL_RXEN);
+       ixgbe_disable_rx(hw);
 
        /* Enable broadcasts */
-       fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
        fctrl |= IXGBE_FCTRL_BAM;
-       fctrl |= IXGBE_FCTRL_DPF;
-       fctrl |= IXGBE_FCTRL_PMCF;
-       IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
+       if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+               fctrl |= IXGBE_FCTRL_DPF;
+               fctrl |= IXGBE_FCTRL_PMCF;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
        /* Always enable jumbo frame reception */
-       hlreg = IXGBE_READ_REG(&sc->hw, IXGBE_HLREG0);
+       hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
        hlreg |= IXGBE_HLREG0_JUMBOEN;
-       IXGBE_WRITE_REG(&sc->hw, IXGBE_HLREG0, hlreg);
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
 
        bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 
        for (i = 0; i < sc->num_queues; i++, rxr++) {
                uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
 
                /* Setup the Base and Length of the Rx Descriptor Ring */
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAL(i),
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
                               (rdba & 0x00000000ffffffffULL));
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAH(i), (rdba >> 32));
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_RDLEN(i),
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
                    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
 
                /* Set up the SRRCTL register */
                srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_SRRCTL(i), srrctl);
+               IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
 
                /* Setup the HW Rx Head and Tail Descriptor Pointers */
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_RDH(i), 0);
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
        }
 
        if (sc->hw.mac.type != ixgbe_mac_82598EB) {
                uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
                              IXGBE_PSRTYPE_UDPHDR |
                              IXGBE_PSRTYPE_IPV4HDR |
                              IXGBE_PSRTYPE_IPV6HDR;
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_PSRTYPE(0), psrtype);
+               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
        }
 
-       rxcsum = IXGBE_READ_REG(&sc->hw, IXGBE_RXCSUM);
-       rxcsum &= ~IXGBE_RXCSUM_PCSD;
-
-       /* Setup RSS */
-       if (sc->num_queues > 1) {
-               int j;
-               reta = 0;
-               /* set up random bits */
-               arc4random_buf(&random, sizeof(random));
-
-               /* Set up the redirection table */
-               for (i = 0, j = 0; i < 128; i++, j++) {
-                       if (j == sc->num_queues)
-                               j = 0;
-                       reta = (reta << 8) | (j * 0x11);
-                       if ((i & 3) == 3)
-                               IXGBE_WRITE_REG(&sc->hw, IXGBE_RETA(i >> 2), 
reta);
-               }
+       rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 
-               /* Now fill our hash function seeds */
-               for (i = 0; i < 10; i++)
-                       IXGBE_WRITE_REG(&sc->hw, IXGBE_RSSRK(i), random[i]);
-
-               /* Perform hash on these packet types */
-               mrqc = IXGBE_MRQC_RSSEN
-                   | IXGBE_MRQC_RSS_FIELD_IPV4
-                   | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
-                   | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_EX
-                   | IXGBE_MRQC_RSS_FIELD_IPV6
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
-                   | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_MRQC, mrqc);
+       ixgbe_initialize_rss_mapping(sc);
 
+       if (sc->num_queues > 1) {
                /* RSS and RX IPP Checksum are mutually exclusive */
                rxcsum |= IXGBE_RXCSUM_PCSD;
        }
 
+       /* This is useful for calculating UDP/IP fragment checksums */
        if (!(rxcsum & IXGBE_RXCSUM_PCSD))
                rxcsum |= IXGBE_RXCSUM_IPPCSE;
 
-       IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCSUM, rxcsum);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+}
+
+void
+ixgbe_initialize_rss_mapping(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t reta = 0, mrqc, rss_key[10];
+       int i, j, queue_id, table_size, index_mult;
+
+       /* set up random bits */
+       arc4random_buf(&rss_key, sizeof(rss_key));
+
+       /* Set multiplier for RETA setup and table size based on MAC */
+       index_mult = 0x1;
+       table_size = 128;
+       switch (sc->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               index_mult = 0x11;
+               break;
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               table_size = 512;
+               break;
+       default:
+               break;
+       }
+
+       /* Set up the redirection table */
+       for (i = 0, j = 0; i < table_size; i++, j++) {
+               if (j == sc->num_queues) j = 0;
+               queue_id = (j * index_mult);
+               /*
+                * The low 8 bits are for hash value (n+0);
+                * The next 8 bits are for hash value (n+1), etc.
+                */
+               reta = reta >> 8;
+               reta = reta | ( ((uint32_t) queue_id) << 24);
+               if ((i & 3) == 3) {
+                       if (i < 128)
+                               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+                       else
+                               IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
+                                   reta);
+                       reta = 0;
+               }
+       }
+
+       /* Now fill our hash function seeds */
+       for (i = 0; i < 10; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
+
+       /*
+        * Disable UDP - IP fragments aren't currently being handled
+        * and so we end up with a mix of 2-tuple and 4-tuple
+        * traffic.
+        */
+       mrqc = IXGBE_MRQC_RSSEN
+            | IXGBE_MRQC_RSS_FIELD_IPV4
+            | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+            | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+            | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+            | IXGBE_MRQC_RSS_FIELD_IPV6
+            | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+       ;
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 }
 
 /*********************************************************************
  *
  *  Free all receive rings.
@@ -3018,34 +2971,45 @@ ixgbe_enable_intr(struct ix_softc *sc)
                    mask |= IXGBE_EIMS_GPI_SDP1;
 
        switch (sc->hw.mac.type) {
        case ixgbe_mac_82599EB:
                mask |= IXGBE_EIMS_ECC;
+               /* Temperature sensor on some adapters */
                mask |= IXGBE_EIMS_GPI_SDP0;
+               /* SFP+ (RX_LOS_N & MOD_ABS_N) */
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
                break;
        case ixgbe_mac_X540:
                mask |= IXGBE_EIMS_ECC;
                /* Detect if Thermal Sensor is enabled */
                fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
                if (fwsm & IXGBE_FWSM_TS_ENABLED)
                        mask |= IXGBE_EIMS_TS;
                break;
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               mask |= IXGBE_EIMS_ECC;
+               /* MAC thermal sensor is automatically enabled */
+               mask |= IXGBE_EIMS_TS;
+               /* Some devices use SDP0 for important information */
+               if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+                   hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
+                       mask |= IXGBE_EIMS_GPI_SDP0_X540;
        default:
                break;
        }
 
        IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
 
-       /* With RSS we use auto clear */
+       /* With MSI-X we use auto clear */
        if (sc->msix > 1) {
                mask = IXGBE_EIMS_ENABLE_MASK;
                /* Don't autoclear Link */
                mask &= ~IXGBE_EIMS_OTHER;
                mask &= ~IXGBE_EIMS_LSC;
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, mask);
+               IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
        }
 
        /*
         * Now enable all queues, this is done separately to
         * allow for handling the extended (beyond 32) MSIX
@@ -3142,10 +3106,12 @@ ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, 
uint8_t vector, int8_t type)
                IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
                break;
 
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
                if (type == -1) { /* MISC IVAR */
                        index = (entry & 1) * 8;
                        ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
                        ivar &= ~(0xFF << index);
                        ivar |= (vector << index);
@@ -3196,12 +3162,32 @@ ixgbe_configure_ivars(struct ix_softc *sc)
  */
 void
 ixgbe_handle_mod(struct ix_softc *sc)
 {
        struct ixgbe_hw *hw = &sc->hw;
+       enum ixgbe_phy_type orig_type = hw->phy.type;
        uint32_t err;
 
+       /* Check to see if the PHY type changed */
+       if (hw->phy.ops.identify) {
+               hw->phy.type = ixgbe_phy_unknown;
+               hw->phy.ops.identify(hw);
+       }
+
+       if (hw->phy.type != orig_type) {
+               if (hw->phy.type == ixgbe_phy_none) {
+                       hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+                       goto out;
+               }
+
+               /* Try to do the initialization that was skipped before */
+               if (hw->phy.ops.init)
+                       hw->phy.ops.init(hw);
+               if (hw->phy.ops.reset)
+                       hw->phy.ops.reset(hw);
+       }
+
        err = hw->phy.ops.identify_sfp(hw);
        if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
                printf("%s: Unsupported SFP+ module type was detected!\n",
                    sc->dev.dv_xname);
                return;
@@ -3210,10 +3196,11 @@ ixgbe_handle_mod(struct ix_softc *sc)
        if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
                printf("%s: Setup failure - unsupported SFP+ module type!\n",
                    sc->dev.dv_xname);
                return;
        }
+ out:
        /* Set the optics type so system reports correctly */
        ixgbe_setup_optics(sc);
 
        ixgbe_handle_msf(sc);
 }
@@ -3236,10 +3223,30 @@ ixgbe_handle_msf(struct ix_softc *sc)
        }
        if (hw->mac.ops.setup_link)
                hw->mac.ops.setup_link(hw, autoneg, TRUE);
 }
 
+/*
+ * External PHY interrupts handler
+ */
+void
+ixgbe_handle_phy(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       int error;
+
+       error = hw->phy.ops.handle_lasi(hw);
+       if (error == IXGBE_ERR_OVERTEMP)
+               printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
+                   " PHY will downshift to lower power state!\n",
+                   sc->dev.dv_xname);
+       else if (error)
+               printf("%s: Error handling LASI interrupt: %d\n",
+                   sc->dev.dv_xname, error);
+
+}
+
 /**********************************************************************
  *
  *  Update the board statistics counters.
  *
  **********************************************************************/
diff --git sys/dev/pci/ixgbe.c sys/dev/pci/ixgbe.c
index 6304f04..14325aa 100644
--- sys/dev/pci/ixgbe.c
+++ sys/dev/pci/ixgbe.c
@@ -654,11 +654,12 @@ int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, 
uint8_t *mac_addr)
 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
                                       uint16_t link_status)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
 
-       hw->bus.type = ixgbe_bus_type_pci_express;
+       if (hw->bus.type == ixgbe_bus_type_unknown)
+               hw->bus.type = ixgbe_bus_type_pci_express;
 
        switch (link_status & IXGBE_PCI_LINK_WIDTH) {
        case IXGBE_PCI_LINK_WIDTH_1:
                hw->bus.width = ixgbe_bus_width_pcie_x1;
                break;
@@ -2285,14 +2286,15 @@ int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
                } else {
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
                        /*
                         * In order to prevent Tx hangs when the internal Tx
                         * switch is enabled we must set the high water mark
-                        * to the maximum FCRTH value.  This allows the Tx
-                        * switch to function even under heavy Rx workloads.
+                        * to the Rx packet buffer size - 24KB.  This allows
+                        * the Tx switch to function even under heavy Rx
+                        * workloads.
                         */
-                       fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+                       fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
                }
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
        }
 
@@ -3567,11 +3569,12 @@ int32_t ixgbe_host_interface_command(struct ixgbe_hw 
*hw, uint32_t *buffer,
  * when a reset occurs.  This function prevents this by flushing the PCIe
  * buffers on the system.
  **/
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
 {
-       uint32_t gcr_ext, hlreg0;
+       uint32_t gcr_ext, hlreg0, i, poll;
+       uint16_t value;
 
        /*
         * If double reset is not requested then all transactions should
         * already be clear and as such there is no work to do
         */
@@ -3584,10 +3587,27 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
         * has already been cleared.
         */
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
 
+       /* Wait for a last completion before clearing buffers */
+       IXGBE_WRITE_FLUSH(hw);
+       msec_delay(3);
+
+       /*
+        * Before proceeding, make sure that the PCIe block does not have
+        * transactions pending.
+        */
+       poll = ixgbe_pcie_timeout_poll(hw);
+       for (i = 0; i < poll; i++) {
+               usec_delay(100);
+               value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+               if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+                       goto out;
+       }
+
+out:
        /* initiate cleaning flow for buffers in the PCIe transaction layer */
        gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
                        gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
 
@@ -3933,10 +3953,16 @@ int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
                status = ixgbe_init_ops_82599(hw);
                break;
        case ixgbe_mac_X540:
                status = ixgbe_init_ops_X540(hw);
                break;
+       case ixgbe_mac_X550:
+               status = ixgbe_init_ops_X550(hw);
+               break;
+       case ixgbe_mac_X550EM_x:
+               status = ixgbe_init_ops_X550EM(hw);
+               break;
        default:
                status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
                break;
        }
        hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
diff --git sys/dev/pci/ixgbe_phy.c sys/dev/pci/ixgbe_phy.c
index 0787534..e6869a3 100644
--- sys/dev/pci/ixgbe_phy.c
+++ sys/dev/pci/ixgbe_phy.c
@@ -1002,31 +1002,19 @@ int32_t ixgbe_get_copper_speeds_supported(struct 
ixgbe_hw *hw)
  **/
 int32_t ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
                                                   ixgbe_link_speed *speed,
                                                   bool *autoneg)
 {
-       int32_t status = IXGBE_ERR_LINK_SETUP;
-       uint16_t speed_ability;
+       int32_t status = IXGBE_SUCCESS;
 
        DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
 
-       *speed = 0;
        *autoneg = TRUE;
+       if (!hw->phy.speeds_supported)
+               status = ixgbe_get_copper_speeds_supported(hw);
 
-       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
-                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-                                     &speed_ability);
-
-       if (status == IXGBE_SUCCESS) {
-               if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
-                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
-                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-               if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
-                       *speed |= IXGBE_LINK_SPEED_100_FULL;
-       }
-
+       *speed = hw->phy.speeds_supported;
        return status;
 }
 
 /**
  *  ixgbe_check_phy_link_tnx - Determine link and speed status
@@ -1444,11 +1432,11 @@ int32_t ixgbe_identify_sfp_module_generic(struct 
ixgbe_hw *hw)
                                hw->phy.sfp_type = ixgbe_sfp_type_lr;
                        else if (comp_codes_10g & IXGBE_SFF_DA_BAD_HP_CABLE)
                                hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
                        else
                                hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-               } else if (hw->mac.type == ixgbe_mac_82599EB) {
+               } else {
                        if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
                                                     ixgbe_sfp_type_da_cu_core0;
                                else
diff --git sys/dev/pci/ixgbe_type.h sys/dev/pci/ixgbe_type.h
index 2fd32eb..3eedb78 100644
--- sys/dev/pci/ixgbe_type.h
+++ sys/dev/pci/ixgbe_type.h
@@ -3479,10 +3479,11 @@ struct ixgbe_phy_operations {
        void (*i2c_bus_clear)(struct ixgbe_hw *);
        int32_t (*read_i2c_combined)(struct ixgbe_hw *, uint8_t addr, uint16_t 
reg, uint16_t *val);
        int32_t (*write_i2c_combined)(struct ixgbe_hw *, uint8_t addr, uint16_t 
reg, uint16_t val);
        int32_t (*check_overtemp)(struct ixgbe_hw *);
        int32_t (*set_phy_power)(struct ixgbe_hw *, bool on);
+       int32_t (*handle_lasi)(struct ixgbe_hw *hw);
        int32_t (*read_i2c_combined_unlocked)(struct ixgbe_hw *, uint8_t addr, 
uint16_t reg,
                                              uint16_t *value);
        int32_t (*write_i2c_combined_unlocked)(struct ixgbe_hw *, uint8_t addr, 
uint16_t reg,
                                               uint16_t value);
        int32_t (*read_i2c_byte_unlocked)(struct ixgbe_hw *, uint8_t offset, 
uint8_t addr,

Reply via email to