Module Name: src
Committed By: msaitoh
Date: Thu May 23 10:40:40 UTC 2019
Modified Files:
src/sys/arch/arm/imx: if_enet.c
src/sys/arch/arm/sunxi: sunxi_emac.c
src/sys/arch/arm/xscale: ixp425_if_npe.c
src/sys/arch/mips/sibyte/dev: sbmac.c
src/sys/dev/ic: i82586.c mtd803.c seeq8005.c
src/sys/dev/marvell: if_gfe.c
src/sys/dev/pci: if_kse.c if_ti.c if_vge.c if_vioif.c if_vr.c
src/sys/dev/usb: if_smsc.c
Log Message:
-No functional change:
- Simplify struct ethercom's pointer near ETHER_FIRST_MULTI().
- Simplify MII structure initialization.
- u_int*_t -> uint*_t.
- KNF
To generate a diff of this commit:
cvs rdiff -u -r1.19 -r1.20 src/sys/arch/arm/imx/if_enet.c
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/arm/sunxi/sunxi_emac.c
cvs rdiff -u -r1.39 -r1.40 src/sys/arch/arm/xscale/ixp425_if_npe.c
cvs rdiff -u -r1.57 -r1.58 src/sys/arch/mips/sibyte/dev/sbmac.c
cvs rdiff -u -r1.81 -r1.82 src/sys/dev/ic/i82586.c
cvs rdiff -u -r1.37 -r1.38 src/sys/dev/ic/mtd803.c
cvs rdiff -u -r1.61 -r1.62 src/sys/dev/ic/seeq8005.c
cvs rdiff -u -r1.51 -r1.52 src/sys/dev/marvell/if_gfe.c
cvs rdiff -u -r1.35 -r1.36 src/sys/dev/pci/if_kse.c
cvs rdiff -u -r1.108 -r1.109 src/sys/dev/pci/if_ti.c
cvs rdiff -u -r1.69 -r1.70 src/sys/dev/pci/if_vge.c
cvs rdiff -u -r1.47 -r1.48 src/sys/dev/pci/if_vioif.c
cvs rdiff -u -r1.127 -r1.128 src/sys/dev/pci/if_vr.c
cvs rdiff -u -r1.43 -r1.44 src/sys/dev/usb/if_smsc.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/arm/imx/if_enet.c
diff -u src/sys/arch/arm/imx/if_enet.c:1.19 src/sys/arch/arm/imx/if_enet.c:1.20
--- src/sys/arch/arm/imx/if_enet.c:1.19 Wed Apr 24 11:18:20 2019
+++ src/sys/arch/arm/imx/if_enet.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_enet.c,v 1.19 2019/04/24 11:18:20 msaitoh Exp $ */
+/* $NetBSD: if_enet.c,v 1.20 2019/05/23 10:40:39 msaitoh Exp $ */
/*
* Copyright (c) 2014 Ryo Shimizu <[email protected]>
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.19 2019/04/24 11:18:20 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.20 2019/05/23 10:40:39 msaitoh Exp $");
#include "vlan.h"
@@ -176,6 +176,7 @@ enet_attach_common(device_t self, bus_sp
{
struct enet_softc *sc;
struct ifnet *ifp;
+ struct mii_data * const mii = &sc->sc_mii;
sc = device_private(self);
sc->sc_dev = self;
@@ -283,23 +284,20 @@ enet_attach_common(device_t self, bus_sp
IFQ_SET_READY(&ifp->if_snd);
/* setup MII */
- sc->sc_ethercom.ec_mii = &sc->sc_mii;
- sc->sc_mii.mii_ifp = ifp;
- sc->sc_mii.mii_readreg = enet_miibus_readreg;
- sc->sc_mii.mii_writereg = enet_miibus_writereg;
- sc->sc_mii.mii_statchg = enet_miibus_statchg;
- ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
- enet_mediastatus);
+ sc->sc_ethercom.ec_mii = mii;
+ mii->mii_ifp = ifp;
+ mii->mii_readreg = enet_miibus_readreg;
+ mii->mii_writereg = enet_miibus_writereg;
+ mii->mii_statchg = enet_miibus_statchg;
+ ifmedia_init(&mii->mii_media, 0, ether_mediachange, enet_mediastatus);
/* try to attach PHY */
- mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
- MII_OFFSET_ANY, 0);
- if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
- ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
- 0, NULL);
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
+ mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
+ if (LIST_FIRST(&mii->mii_phys) == NULL) {
+ ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
} else {
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
}
if_attach(ifp);
@@ -460,9 +458,9 @@ enet_intr(void *arg)
status = ENET_REG_READ(sc, ENET_EIR);
if (sc->sc_imxtype == 7) {
- if (status & (ENET_EIR_TXF|ENET_EIR_TXF1|ENET_EIR_TXF2))
+ if (status & (ENET_EIR_TXF | ENET_EIR_TXF1 | ENET_EIR_TXF2))
enet_tx_intr(arg);
- if (status & (ENET_EIR_RXF|ENET_EIR_RXF1|ENET_EIR_RXF2))
+ if (status & (ENET_EIR_RXF | ENET_EIR_RXF1 | ENET_EIR_RXF2))
enet_rx_intr(arg);
} else {
if (status & ENET_EIR_TXF)
@@ -761,17 +759,16 @@ enet_rx_csum(struct enet_softc *sc, stru
static void
enet_setmulti(struct enet_softc *sc)
{
- struct ifnet *ifp;
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
struct ether_multi *enm;
struct ether_multistep step;
int promisc;
uint32_t crc;
uint32_t gaddr[2];
- ifp = &sc->sc_ethercom.ec_if;
-
promisc = 0;
- if ((ifp->if_flags & IFF_PROMISC) || sc->sc_ethercom.ec_multicnt > 0) {
+ if ((ifp->if_flags & IFF_PROMISC) || ec->ec_multicnt > 0) {
ifp->if_flags |= IFF_ALLMULTI;
if (ifp->if_flags & IFF_PROMISC)
promisc = 1;
@@ -779,7 +776,7 @@ enet_setmulti(struct enet_softc *sc)
} else {
gaddr[0] = gaddr[1] = 0;
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
gaddr[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
@@ -1888,9 +1885,10 @@ enet_init_regs(struct enet_softc *sc, in
sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
/* enable interrupts */
- val = ENET_EIMR|ENET_EIR_TXF|ENET_EIR_RXF|ENET_EIR_EBERR;
+ val = ENET_EIMR | ENET_EIR_TXF | ENET_EIR_RXF | ENET_EIR_EBERR;
if (sc->sc_imxtype == 7)
- val |= ENET_EIR_TXF2|ENET_EIR_RXF2|ENET_EIR_TXF1|ENET_EIR_RXF1;
+ val |= ENET_EIR_TXF2 | ENET_EIR_RXF2 | ENET_EIR_TXF1 |
+ ENET_EIR_RXF1;
ENET_REG_WRITE(sc, ENET_EIMR, val);
/* enable ether */
Index: src/sys/arch/arm/sunxi/sunxi_emac.c
diff -u src/sys/arch/arm/sunxi/sunxi_emac.c:1.26 src/sys/arch/arm/sunxi/sunxi_emac.c:1.27
--- src/sys/arch/arm/sunxi/sunxi_emac.c:1.26 Thu May 9 01:46:37 2019
+++ src/sys/arch/arm/sunxi/sunxi_emac.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: sunxi_emac.c,v 1.26 2019/05/09 01:46:37 ozaki-r Exp $ */
+/* $NetBSD: sunxi_emac.c,v 1.27 2019/05/23 10:40:39 msaitoh Exp $ */
/*-
* Copyright (c) 2016-2017 Jared McNeill <[email protected]>
@@ -33,7 +33,7 @@
#include "opt_net_mpsafe.h"
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sunxi_emac.c,v 1.26 2019/05/09 01:46:37 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sunxi_emac.c,v 1.27 2019/05/23 10:40:39 msaitoh Exp $");
#include <sys/param.h>
#include <sys/bus.h>
@@ -285,7 +285,7 @@ sunxi_emac_update_link(struct sunxi_emac
WR4(sc, EMAC_RX_CTL_0, val);
val = RD4(sc, EMAC_TX_FLOW_CTL);
- val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN);
+ val &= ~(PAUSE_TIME | TX_FLOW_CTL_EN);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
val |= TX_FLOW_CTL_EN;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
@@ -346,7 +346,7 @@ sunxi_emac_setup_txbuf(struct sunxi_emac
u_int csum_flags;
error = bus_dmamap_load_mbuf(sc->tx.buf_tag,
- sc->tx.buf_map[index].map, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+ sc->tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (error == EFBIG) {
device_printf(sc->dev,
"TX packet needs too many DMA segments, dropping...\n");
@@ -409,7 +409,7 @@ sunxi_emac_setup_rxbuf(struct sunxi_emac
m_adj(m, ETHER_ALIGN);
error = bus_dmamap_load_mbuf(sc->rx.buf_tag,
- sc->rx.buf_map[index].map, m, BUS_DMA_READ|BUS_DMA_NOWAIT);
+ sc->rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT);
if (error != 0)
return error;
@@ -473,7 +473,7 @@ sunxi_emac_start_locked(struct sunxi_ema
if (cnt != 0) {
sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map,
start, sc->tx.cur, TX_DESC_COUNT,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Start and run TX DMA */
val = RD4(sc, EMAC_TX_CTL_1);
@@ -525,7 +525,8 @@ bitrev32(uint32_t x)
static void
sunxi_emac_setup_rxfilter(struct sunxi_emac_softc *sc)
{
- struct ifnet *ifp = &sc->ec.ec_if;
+ struct ethercom *ec = &sc->ec;
+ struct ifnet *ifp = &ec->ec_if;
uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo;
struct ether_multi *enm;
struct ether_multistep step;
@@ -543,8 +544,8 @@ sunxi_emac_setup_rxfilter(struct sunxi_e
hash[0] = hash[1] = ~0;
} else {
val |= HASH_MULTICAST;
- ETHER_LOCK(&sc->ec);
- ETHER_FIRST_MULTI(step, &sc->ec, enm);
+ ETHER_LOCK(ec);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
crc &= 0x7f;
@@ -554,7 +555,7 @@ sunxi_emac_setup_rxfilter(struct sunxi_e
hash[hashreg] |= (1 << hashbit);
ETHER_NEXT_MULTI(step, enm);
}
- ETHER_UNLOCK(&sc->ec);
+ ETHER_UNLOCK(ec);
}
/* Write our unicast address */
@@ -783,8 +784,8 @@ sunxi_emac_rxintr(struct sunxi_emac_soft
for (index = sc->rx.cur; ; index = RX_NEXT(index)) {
sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map,
- index, index + 1,
- RX_DESC_COUNT, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ index, index + 1, RX_DESC_COUNT,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
status = le32toh(sc->rx.desc_ring[index].status);
if ((status & RX_DESC_CTL) != 0)
@@ -831,7 +832,7 @@ sunxi_emac_rxintr(struct sunxi_emac_soft
sunxi_emac_dma_sync(sc, sc->rx.desc_tag, sc->rx.desc_map,
index, index + 1,
- RX_DESC_COUNT, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+ RX_DESC_COUNT, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
}
sc->rx.cur = index;
@@ -854,7 +855,7 @@ sunxi_emac_txintr(struct sunxi_emac_soft
KASSERT(sc->tx.queued > 0 && sc->tx.queued <= TX_DESC_COUNT);
sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map,
i, i + 1, TX_DESC_COUNT,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
desc = &sc->tx.desc_ring[i];
status = le32toh(desc->status);
if ((status & TX_DESC_CTL) != 0)
@@ -872,7 +873,7 @@ sunxi_emac_txintr(struct sunxi_emac_soft
sunxi_emac_setup_txdesc(sc, i, 0, 0, 0);
sunxi_emac_dma_sync(sc, sc->tx.desc_tag, sc->tx.desc_map,
i, i + 1, TX_DESC_COUNT,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ifp->if_flags &= ~IFF_OACTIVE;
ifp->if_opackets++;
@@ -896,7 +897,7 @@ sunxi_emac_intr(void *arg)
if (val & RX_INT)
sunxi_emac_rxintr(sc);
- if (val & (TX_INT|TX_BUF_UA_INT)) {
+ if (val & (TX_INT | TX_BUF_UA_INT)) {
sunxi_emac_txintr(sc);
if_schedule_deferred_start(ifp);
}
@@ -1113,7 +1114,7 @@ sunxi_emac_setup_resources(struct sunxi_
return 0;
}
-static void
+static void
sunxi_emac_get_eaddr(struct sunxi_emac_softc *sc, uint8_t *eaddr)
{
uint32_t maclo, machi;
@@ -1206,7 +1207,7 @@ sunxi_emac_setup_dma(struct sunxi_emac_s
memset(sc->tx.desc_ring, 0, TX_DESC_SIZE);
bus_dmamap_sync(sc->dmat, sc->tx.desc_map, 0, TX_DESC_SIZE,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
for (i = 0; i < TX_DESC_COUNT; i++)
sc->tx.desc_ring[i].next =
@@ -1267,7 +1268,7 @@ sunxi_emac_setup_dma(struct sunxi_emac_s
}
bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
0, sc->rx.desc_map->dm_mapsize,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return 0;
}
@@ -1449,7 +1450,7 @@ sunxi_emac_attach(device_t parent, devic
aprint_error_dev(self, "no PHY found!\n");
return;
}
- ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
/* Attach interface */
if_attach(ifp);
Index: src/sys/arch/arm/xscale/ixp425_if_npe.c
diff -u src/sys/arch/arm/xscale/ixp425_if_npe.c:1.39 src/sys/arch/arm/xscale/ixp425_if_npe.c:1.40
--- src/sys/arch/arm/xscale/ixp425_if_npe.c:1.39 Fri Apr 26 06:33:33 2019
+++ src/sys/arch/arm/xscale/ixp425_if_npe.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: ixp425_if_npe.c,v 1.39 2019/04/26 06:33:33 msaitoh Exp $ */
+/* $NetBSD: ixp425_if_npe.c,v 1.40 2019/05/23 10:40:39 msaitoh Exp $ */
/*-
* Copyright (c) 2006 Sam Leffler. All rights reserved.
@@ -28,7 +28,7 @@
#if 0
__FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/if_npe.c,v 1.1 2006/11/19 23:55:23 sam Exp $");
#endif
-__KERNEL_RCSID(0, "$NetBSD: ixp425_if_npe.c,v 1.39 2019/04/26 06:33:33 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ixp425_if_npe.c,v 1.40 2019/05/23 10:40:39 msaitoh Exp $");
/*
* Intel XScale NPE Ethernet driver.
@@ -192,13 +192,13 @@ static int npe_activate(struct npe_softc
#if 0
static void npe_deactivate(struct npe_softc *);
#endif
-static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
-static void npe_setmac(struct npe_softc *sc, const u_char *eaddr);
-static void npe_getmac(struct npe_softc *sc);
-static void npe_txdone(int qid, void *arg);
+static void npe_ifmedia_status(struct ifnet *, struct ifmediareq *);
+static void npe_setmac(struct npe_softc *, const u_char *);
+static void npe_getmac(struct npe_softc *);
+static void npe_txdone(int, void *);
static int npe_rxbuf_init(struct npe_softc *, struct npebuf *,
struct mbuf *);
-static void npe_rxdone(int qid, void *arg);
+static void npe_rxdone(int, void *);
static void npeinit_macreg(struct npe_softc *);
static int npeinit(struct ifnet *);
static void npeinit_resetcb(void *);
@@ -206,15 +206,14 @@ static void npeinit_locked(void *);
static void npestart(struct ifnet *);
static void npestop(struct ifnet *, int);
static void npewatchdog(struct ifnet *);
-static int npeioctl(struct ifnet * ifp, u_long, void *);
+static int npeioctl(struct ifnet *, u_long, void *);
-static int npe_setrxqosentry(struct npe_softc *, int classix,
- int trafclass, int qid);
+static int npe_setrxqosentry(struct npe_softc *, int, int, int);
static int npe_updatestats(struct npe_softc *);
#if 0
static int npe_getstats(struct npe_softc *);
static uint32_t npe_getimageid(struct npe_softc *);
-static int npe_setloopback(struct npe_softc *, int ena);
+static int npe_setloopback(struct npe_softc *, int);
#endif
static int npe_miibus_readreg(device_t, int, int, uint16_t *);
@@ -265,6 +264,7 @@ npe_attach(device_t parent, device_t sel
struct ixpnpe_softc *isc = device_private(parent);
struct ixpnpe_attach_args *na = arg;
struct ifnet *ifp;
+ struct mii_data * const mii = &sc->sc_mii;
aprint_naive("\n");
aprint_normal(": Ethernet co-processor\n");
@@ -277,7 +277,7 @@ npe_attach(device_t parent, device_t sel
sc->sc_phy = na->na_phy;
memset(&sc->sc_ethercom, 0, sizeof(sc->sc_ethercom));
- memset(&sc->sc_mii, 0, sizeof(sc->sc_mii));
+ memset(mii, 0, sizeof(*mii));
callout_init(&sc->sc_tick_ch, 0);
@@ -294,22 +294,22 @@ npe_attach(device_t parent, device_t sel
ether_sprintf(sc->sc_enaddr));
ifp = &sc->sc_ethercom.ec_if;
- sc->sc_mii.mii_ifp = ifp;
- sc->sc_mii.mii_readreg = npe_miibus_readreg;
- sc->sc_mii.mii_writereg = npe_miibus_writereg;
- sc->sc_mii.mii_statchg = npe_miibus_statchg;
- sc->sc_ethercom.ec_mii = &sc->sc_mii;
+ mii->mii_ifp = ifp;
+ mii->mii_readreg = npe_miibus_readreg;
+ mii->mii_writereg = npe_miibus_writereg;
+ mii->mii_statchg = npe_miibus_statchg;
+ sc->sc_ethercom.ec_mii = mii;
- ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
+ ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
npe_ifmedia_status);
- mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
+ mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, MIIF_DOPAUSE);
- if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
- ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
+ if (LIST_FIRST(&mii->mii_phys) == NULL) {
+ ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
} else
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
ifp->if_softc = sc;
strcpy(ifp->if_xname, device_xname(sc->sc_dev));
@@ -341,7 +341,8 @@ npe_attach(device_t parent, device_t sel
static void
npe_setmcast(struct npe_softc *sc)
{
- struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
uint32_t reg;
uint32_t msg[2];
@@ -368,7 +369,7 @@ npe_setmcast(struct npe_softc *sc)
memset(clr, 0, ETHER_ADDR_LEN);
memset(set, 0xff, ETHER_ADDR_LEN);
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
ETHER_ADDR_LEN)) {
@@ -888,7 +889,7 @@ npe_rxbuf_init(struct npe_softc *sc, str
m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size
- (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN));
error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m,
- BUS_DMA_READ|BUS_DMA_NOWAIT);
+ BUS_DMA_READ | BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(m);
return error;
@@ -1258,7 +1259,7 @@ npestart(struct ifnet *ifp)
int nseg, len, error, i;
uint32_t next;
- if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
return;
while (sc->tx_free != NULL) {
@@ -1267,7 +1268,7 @@ npestart(struct ifnet *ifp)
break;
npe = sc->tx_free;
error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m,
- BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+ BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (error == EFBIG) {
n = npe_defrag(m);
if (n == NULL) {
@@ -1278,7 +1279,7 @@ npestart(struct ifnet *ifp)
}
m = n;
error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map,
- m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+ m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
}
if (error != 0) {
printf("%s: %s: error %u\n",
@@ -1438,13 +1439,13 @@ npeioctl(struct ifnet *ifp, u_long cmd,
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
break;
case SIOCSIFFLAGS:
- if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == IFF_RUNNING) {
+ if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_RUNNING) {
/*
* If interface is marked down and it is running,
* then stop and disable it.
*/
(*ifp->if_stop)(ifp, 1);
- } else if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == IFF_UP) {
+ } else if ((ifp->if_flags & (IFF_UP |IFF_RUNNING)) == IFF_UP) {
/*
* If interface is marked up and it is stopped, then
* start it.
@@ -1456,8 +1457,8 @@ npeioctl(struct ifnet *ifp, u_long cmd,
/* Up (AND RUNNING). */
diff = (ifp->if_flags ^ sc->sc_if_flags)
- & (IFF_PROMISC|IFF_ALLMULTI);
- if ((diff & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
+ & (IFF_PROMISC | IFF_ALLMULTI);
+ if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
/*
* If the difference bettween last flag and
* new flag only IFF_PROMISC or IFF_ALLMULTI,
Index: src/sys/arch/mips/sibyte/dev/sbmac.c
diff -u src/sys/arch/mips/sibyte/dev/sbmac.c:1.57 src/sys/arch/mips/sibyte/dev/sbmac.c:1.58
--- src/sys/arch/mips/sibyte/dev/sbmac.c:1.57 Mon Apr 22 08:39:10 2019
+++ src/sys/arch/mips/sibyte/dev/sbmac.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: sbmac.c,v 1.57 2019/04/22 08:39:10 msaitoh Exp $ */
+/* $NetBSD: sbmac.c,v 1.58 2019/05/23 10:40:39 msaitoh Exp $ */
/*
* Copyright 2000, 2001, 2004
@@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.57 2019/04/22 08:39:10 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.58 2019/05/23 10:40:39 msaitoh Exp $");
#include "opt_inet.h"
#include "opt_ns.h"
@@ -294,7 +294,7 @@ sbmac_mii_bitbang_write(device_t self, u
reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
SBMAC_WRITECSR(reg, (val &
- (M_MAC_MDC|M_MAC_MDIO_DIR|M_MAC_MDIO_OUT|M_MAC_MDIO_IN)));
+ (M_MAC_MDC | M_MAC_MDIO_DIR | M_MAC_MDIO_OUT | M_MAC_MDIO_IN)));
}
/*
@@ -606,7 +606,7 @@ sbdma_add_txbuffer(sbmacdma_t *d, struct
* Loop thru this mbuf record.
* The head mbuf will have SOP set.
*/
- d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m,void *)) |
+ d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m, void *)) |
M_DMA_ETHTX_SOP;
/*
@@ -616,7 +616,7 @@ sbdma_add_txbuffer(sbmacdma_t *d, struct
d->sbdma_dscrtable[dsc].dscr_b =
V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
V_DMA_DSCRB_A_SIZE((m->m_len +
- (mtod(m,uintptr_t) & 0x0000001F))) |
+ (mtod(m, uintptr_t) & 0x0000001F))) |
V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) |
V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff);
@@ -712,7 +712,7 @@ again:
* going.
*/
- MGETHDR(m_new,M_DONTWAIT,MT_DATA);
+ MGETHDR(m_new, M_DONTWAIT, MT_DATA);
if (m_new == NULL) {
aprint_error_dev(d->sbdma_eth->sc_dev,
"mbuf allocation failed\n");
@@ -720,7 +720,7 @@ again:
return ENOBUFS;
}
- MCLGET(m_new,M_DONTWAIT);
+ MCLGET(m_new, M_DONTWAIT);
if (!(m_new->m_flags & M_EXT)) {
aprint_error_dev(d->sbdma_eth->sc_dev,
"mbuf cluster allocation failed\n");
@@ -730,7 +730,7 @@ again:
}
m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
- /*m_adj(m_new,ETHER_ALIGN);*/
+ /*m_adj(m_new, ETHER_ALIGN);*/
/*
* XXX Don't forget to include the offset portion in the
@@ -741,7 +741,7 @@ again:
* Copy data
*/
- m_copydata(m,0,m->m_pkthdr.len,mtod(m_new,void *));
+ m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len;
/* Free old mbuf 'm', actual mbuf is now 'm_new' */
@@ -1089,7 +1089,7 @@ sbmac_initctx(struct sbmac_softc *sc)
sc->sbm_duplex = sbmac_duplex_half;
sc->sbm_fc = sbmac_fc_disabled;
- /*
+ /*
* Determine SOC type. 112x has Pass3 SOC features.
*/
sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) );
@@ -1286,18 +1286,18 @@ sbmac_channel_start(struct sbmac_softc *
* On chips which support unaligned DMA features, set the descriptor
* ring for transmit channels to use the unaligned buffer format.
*/
- txdma = &(sc->sbm_txdma);
+ txdma = &(sc->sbm_txdma);
if (sc->sbm_pass3_dma) {
dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0);
dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) |
M_DMA_TBX_EN | M_DMA_TDX_EN;
- SBMAC_WRITECSR(txdma->sbdma_config0,dma_cfg0);
+ SBMAC_WRITECSR(txdma->sbdma_config0, dma_cfg0);
fifo_cfg = SBMAC_READCSR(sc->sbm_fifocfg);
fifo_cfg |= V_MAC_TX_WR_THRSH(8) |
V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8);
- SBMAC_WRITECSR(sc->sbm_fifocfg,fifo_cfg);
+ SBMAC_WRITECSR(sc->sbm_fifocfg, fifo_cfg);
}
/*
@@ -1779,7 +1779,7 @@ sbmac_start(struct ifnet *ifp)
struct mbuf *m_head = NULL;
int rv;
- if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
return;
sc = ifp->if_softc;
@@ -1837,15 +1837,14 @@ sbmac_start(struct ifnet *ifp)
static void
sbmac_setmulti(struct sbmac_softc *sc)
{
- struct ifnet *ifp;
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
uint64_t reg;
sbmac_port_t port;
int idx;
struct ether_multi *enm;
struct ether_multistep step;
- ifp = &sc->sc_ethercom.ec_if;
-
/*
* Clear out entire multicast table. We do this by nuking
* the entire hash table and all the direct matches except
@@ -1895,7 +1894,7 @@ sbmac_setmulti(struct sbmac_softc *sc)
*/
idx = 1; /* skip station address */
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) {
reg = sbmac_addr2reg(enm->enm_addrlo);
port = PKSEG1(sc->sbm_base +
@@ -1960,7 +1959,7 @@ sbmac_ether_ioctl(struct ifnet *ifp, u_l
return ENOTTY;
}
- return (0);
+ return 0;
}
/*
@@ -2037,7 +2036,7 @@ sbmac_ioctl(struct ifnet *ifp, u_long cm
(void)splx(s);
- return(error);
+ return error;
}
/*
@@ -2234,6 +2233,7 @@ sbmac_attach(device_t parent, device_t s
{
struct sbmac_softc * const sc = device_private(self);
struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
+ struct mii_data * const mii = &sc->sc_mii;
struct sbobio_attach_args * const sa = aux;
u_char *eaddr;
static int unit = 0; /* XXX */
@@ -2317,23 +2317,20 @@ sbmac_attach(device_t parent, device_t s
/*
* Initialize MII/media info.
*/
- sc->sc_mii.mii_ifp = ifp;
- sc->sc_mii.mii_readreg = sbmac_mii_readreg;
- sc->sc_mii.mii_writereg = sbmac_mii_writereg;
- sc->sc_mii.mii_statchg = sbmac_mii_statchg;
- sc->sc_ethercom.ec_mii = &sc->sc_mii;
- ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
- ether_mediastatus);
- mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
+ mii->mii_ifp = ifp;
+ mii->mii_readreg = sbmac_mii_readreg;
+ mii->mii_writereg = sbmac_mii_writereg;
+ mii->mii_statchg = sbmac_mii_statchg;
+ sc->sc_ethercom.ec_mii = mii;
+ ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
+ mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, 0);
- if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
- ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
- } else {
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
- }
-
+ if (LIST_FIRST(&mii->mii_phys) == NULL) {
+ ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
+ } else
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
/*
* map/route interrupt
Index: src/sys/dev/ic/i82586.c
diff -u src/sys/dev/ic/i82586.c:1.81 src/sys/dev/ic/i82586.c:1.82
--- src/sys/dev/ic/i82586.c:1.81 Fri Apr 26 06:33:33 2019
+++ src/sys/dev/ic/i82586.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: i82586.c,v 1.81 2019/04/26 06:33:33 msaitoh Exp $ */
+/* $NetBSD: i82586.c,v 1.82 2019/05/23 10:40:39 msaitoh Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@@ -137,7 +137,7 @@ Mode of operation:
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i82586.c,v 1.81 2019/04/26 06:33:33 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i82586.c,v 1.82 2019/05/23 10:40:39 msaitoh Exp $");
#include <sys/param.h>
@@ -963,7 +963,7 @@ ieget(struct ie_softc *sc, int head, int
len = uimin(thisrblen, thismblen);
(sc->memcopyin)(sc, mtod(m, char *) + thismboff,
- IE_RBUF_ADDR(sc,head) + thisrboff,
+ IE_RBUF_ADDR(sc, head) + thisrboff,
(u_int)len);
resid -= len;
@@ -1102,7 +1102,7 @@ iexmit(struct ie_softc *sc)
i82586_start_transceiver(sc);
}
} else {
- sc->ie_bus_write16(sc, IE_CMD_XMIT_LINK(sc->xmit_cmds,cur),
+ sc->ie_bus_write16(sc, IE_CMD_XMIT_LINK(sc->xmit_cmds, cur),
0xffff);
sc->ie_bus_write16(sc, IE_CMD_XMIT_CMD(sc->xmit_cmds, cur),
@@ -1415,18 +1415,18 @@ i82586_setup_bufs(struct ie_softc *sc)
int m = (n == sc->nframes - 1) ? 0 : n + 1;
/* Clear status */
- sc->ie_bus_write16(sc, IE_RFRAME_STATUS(sc->rframes,n), 0);
+ sc->ie_bus_write16(sc, IE_RFRAME_STATUS(sc->rframes, n), 0);
/* RBD link = NULL */
- sc->ie_bus_write16(sc, IE_RFRAME_BUFDESC(sc->rframes,n),
+ sc->ie_bus_write16(sc, IE_RFRAME_BUFDESC(sc->rframes, n),
0xffff);
/* Make a circular list */
- sc->ie_bus_write16(sc, IE_RFRAME_NEXT(sc->rframes,n),
- IE_RFRAME_ADDR(sc->rframes,m));
+ sc->ie_bus_write16(sc, IE_RFRAME_NEXT(sc->rframes, n),
+ IE_RFRAME_ADDR(sc->rframes, m));
/* Mark last as EOL */
- sc->ie_bus_write16(sc, IE_RFRAME_LAST(sc->rframes,n),
+ sc->ie_bus_write16(sc, IE_RFRAME_LAST(sc->rframes, n),
((m==0)? (IE_FD_EOL | IE_FD_SUSP) : 0));
}
@@ -1437,16 +1437,16 @@ i82586_setup_bufs(struct ie_softc *sc)
int m = (n == sc->nrxbuf - 1) ? 0 : n + 1;
/* Clear status */
- sc->ie_bus_write16(sc, IE_RBD_STATUS(sc->rbds,n), 0);
+ sc->ie_bus_write16(sc, IE_RBD_STATUS(sc->rbds, n), 0);
/* Make a circular list */
- sc->ie_bus_write16(sc, IE_RBD_NEXT(sc->rbds,n),
- IE_RBD_ADDR(sc->rbds,m));
+ sc->ie_bus_write16(sc, IE_RBD_NEXT(sc->rbds, n),
+ IE_RBD_ADDR(sc->rbds, m));
/* Link to data buffers */
sc->ie_bus_write24(sc, IE_RBD_BUFADDR(sc->rbds, n),
IE_RBUF_ADDR(sc, n));
- sc->ie_bus_write16(sc, IE_RBD_BUFLEN(sc->rbds,n),
+ sc->ie_bus_write16(sc, IE_RBD_BUFLEN(sc->rbds, n),
IE_RBUF_SIZE | ((m==0)?IE_RBD_EOL:0));
}
@@ -1681,11 +1681,11 @@ i82586_start_transceiver(struct ie_softc
/*
* Start RU at current position in frame & RBD lists.
*/
- sc->ie_bus_write16(sc, IE_RFRAME_BUFDESC(sc->rframes,sc->rfhead),
+ sc->ie_bus_write16(sc, IE_RFRAME_BUFDESC(sc->rframes, sc->rfhead),
IE_RBD_ADDR(sc->rbds, sc->rbhead));
sc->ie_bus_write16(sc, IE_SCB_RCVLST(sc->scb),
- IE_RFRAME_ADDR(sc->rframes,sc->rfhead));
+ IE_RFRAME_ADDR(sc->rframes, sc->rfhead));
if (sc->do_xmitnopchain) {
/* Stop transmit command chain */
@@ -1729,7 +1729,7 @@ i82586_ioctl(struct ifnet *ifp, unsigned
int s, error = 0;
s = splnet();
- switch(cmd) {
+ switch (cmd) {
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
@@ -1758,6 +1758,7 @@ i82586_ioctl(struct ifnet *ifp, unsigned
static void
ie_mc_reset(struct ie_softc *sc)
{
+ struct ethercom *ec = &sc->sc_ethercom;
struct ether_multi *enm;
struct ether_multistep step;
int size;
@@ -1768,14 +1769,13 @@ ie_mc_reset(struct ie_softc *sc)
again:
size = 0;
sc->mcast_count = 0;
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm) {
size += 6;
if (sc->mcast_count >= IE_MAXMCAST ||
memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
- sc->sc_ethercom.ec_if.if_flags |= IFF_ALLMULTI;
- i82586_ioctl(&sc->sc_ethercom.ec_if,
- SIOCSIFFLAGS, NULL);
+ ec->ec_if.if_flags |= IFF_ALLMULTI;
+ i82586_ioctl(&ec->ec_if, SIOCSIFFLAGS, NULL);
return;
}
ETHER_NEXT_MULTI(step, enm);
@@ -1836,10 +1836,10 @@ print_rbd(struct ie_softc *sc, int n)
{
printf("RBD at %08x:\n status %04x, next %04x, buffer %lx\n"
- "length/EOL %04x\n", IE_RBD_ADDR(sc->rbds,n),
- sc->ie_bus_read16(sc, IE_RBD_STATUS(sc->rbds,n)),
- sc->ie_bus_read16(sc, IE_RBD_NEXT(sc->rbds,n)),
+ "length/EOL %04x\n", IE_RBD_ADDR(sc->rbds, n),
+ sc->ie_bus_read16(sc, IE_RBD_STATUS(sc->rbds, n)),
+ sc->ie_bus_read16(sc, IE_RBD_NEXT(sc->rbds, n)),
(u_long)0,/*bus_space_read_4(sc->bt, sc->bh, IE_RBD_BUFADDR(sc->rbds,n)),-* XXX */
- sc->ie_bus_read16(sc, IE_RBD_BUFLEN(sc->rbds,n)));
+ sc->ie_bus_read16(sc, IE_RBD_BUFLEN(sc->rbds, n)));
}
#endif
Index: src/sys/dev/ic/mtd803.c
diff -u src/sys/dev/ic/mtd803.c:1.37 src/sys/dev/ic/mtd803.c:1.38
--- src/sys/dev/ic/mtd803.c:1.37 Tue Jan 22 03:42:26 2019
+++ src/sys/dev/ic/mtd803.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: mtd803.c,v 1.37 2019/01/22 03:42:26 msaitoh Exp $ */
+/* $NetBSD: mtd803.c,v 1.38 2019/05/23 10:40:39 msaitoh Exp $ */
/*-
*
@@ -44,7 +44,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.37 2019/01/22 03:42:26 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.38 2019/05/23 10:40:39 msaitoh Exp $");
#include <sys/param.h>
@@ -162,9 +162,8 @@ mtd_config(struct mtd_softc *sc)
if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
aprint_error_dev(sc->dev, "Unable to configure MII\n");
return 1;
- } else {
+ } else
ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
- }
if (mtd_init_desc(sc))
return 1;
@@ -252,14 +251,16 @@ mtd_init_desc(struct mtd_softc *sc)
/* Allocate DMA-safe memory */
if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
- aprint_error_dev(sc->dev, "unable to allocate DMA buffer, error = %d\n", err);
+ aprint_error_dev(sc->dev,
+ "unable to allocate DMA buffer, error = %d\n", err);
return 1;
}
/* Map memory to kernel addressable space */
if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
(void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
- aprint_error_dev(sc->dev, "unable to map DMA buffer, error = %d\n", err);
+ aprint_error_dev(sc->dev,
+ "unable to map DMA buffer, error = %d\n", err);
bus_dmamem_free(sc->dma_tag, &seg, rseg);
return 1;
}
@@ -267,7 +268,8 @@ mtd_init_desc(struct mtd_softc *sc)
/* Create a DMA map */
if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
- aprint_error_dev(sc->dev, "unable to create DMA map, error = %d\n", err);
+ aprint_error_dev(sc->dev,
+ "unable to create DMA map, error = %d\n", err);
bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
bus_dmamem_free(sc->dma_tag, &seg, rseg);
return 1;
@@ -276,8 +278,8 @@ mtd_init_desc(struct mtd_softc *sc)
/* Load the DMA map */
if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
size, NULL, BUS_DMA_NOWAIT)) != 0) {
- aprint_error_dev(sc->dev, "unable to load DMA map, error = %d\n",
- err);
+ aprint_error_dev(sc->dev,
+ "unable to load DMA map, error = %d\n", err);
bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
bus_dmamem_free(sc->dma_tag, &seg, rseg);
@@ -290,8 +292,8 @@ mtd_init_desc(struct mtd_softc *sc)
/* Allocate DMA-safe memory */
if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
- aprint_error_dev(sc->dev, "unable to allocate DMA buffer, error = %d\n",
- err);
+ aprint_error_dev(sc->dev,
+ "unable to allocate DMA buffer, error = %d\n", err);
/* Undo DMA map for descriptors */
bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
@@ -304,8 +306,8 @@ mtd_init_desc(struct mtd_softc *sc)
/* Map memory to kernel addressable space */
if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
&sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
- aprint_error_dev(sc->dev, "unable to map DMA buffer, error = %d\n",
- err);
+ aprint_error_dev(sc->dev,
+ "unable to map DMA buffer, error = %d\n", err);
bus_dmamem_free(sc->dma_tag, &seg, rseg);
/* Undo DMA map for descriptors */
@@ -319,8 +321,8 @@ mtd_init_desc(struct mtd_softc *sc)
/* Create a DMA map */
if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
- aprint_error_dev(sc->dev, "unable to create DMA map, error = %d\n",
- err);
+ aprint_error_dev(sc->dev,
+ "unable to create DMA map, error = %d\n", err);
bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
bus_dmamem_free(sc->dma_tag, &seg, rseg);
@@ -335,8 +337,8 @@ mtd_init_desc(struct mtd_softc *sc)
/* Load the DMA map */
if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
size, NULL, BUS_DMA_NOWAIT)) != 0) {
- aprint_error_dev(sc->dev, "unable to load DMA map, error = %d\n",
- err);
+ aprint_error_dev(sc->dev,
+ "unable to load DMA map, error = %d\n", err);
bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
bus_dmamem_free(sc->dma_tag, &seg, rseg);
@@ -372,16 +374,16 @@ mtd_init_desc(struct mtd_softc *sc)
/* Fill in tx descriptors */
for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */
- if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
+ if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
/* Link back to first tx descriptor */
sc->desc[i].next =
htole32(sc->desc_dma_map->dm_segs[0].ds_addr
- +MTD_NUM_RXD * sizeof(struct mtd_desc));
+ +MTD_NUM_RXD * sizeof(struct mtd_desc));
} else {
/* Link forward to next tx descriptor */
sc->desc[i].next =
htole32(sc->desc_dma_map->dm_segs[0].ds_addr
- + (i + 1) * sizeof(struct mtd_desc));
+ + (i + 1) * sizeof(struct mtd_desc));
}
/* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
/* Set buffer's address */
@@ -438,8 +440,8 @@ mtd_put(struct mtd_softc *sc, int index,
continue;
} else if (tlen > MTD_TXBUF_SIZE) {
/* XXX FIXME: No idea what to do here. */
- aprint_error_dev(sc->dev, "packet too large! Size = %i\n",
- tlen);
+ aprint_error_dev(sc->dev,
+ "packet too large! Size = %i\n", tlen);
n = m_free(m);
continue;
}
@@ -488,11 +490,12 @@ mtd_start(struct ifnet *ifp)
/* Mark first & last descriptor */
sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
- if (sc->cur_tx == 0) {
- sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
- } else {
- sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
- }
+ if (sc->cur_tx == 0)
+ sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf
+ |= MTD_TXD_CONF_LSD;
+ else
+ sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf
+ |= MTD_TXD_CONF_LSD;
/* Give first descriptor to chip to complete transaction */
sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
@@ -636,7 +639,8 @@ mtd_rxirq(struct mtd_softc *sc)
for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
/* Error summary set? */
if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
- aprint_error_dev(sc->dev, "received packet with errors\n");
+ aprint_error_dev(sc->dev,
+ "received packet with errors\n");
/* Give up packet, since an error occurred */
sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
@@ -653,8 +657,8 @@ mtd_rxirq(struct mtd_softc *sc)
/* Check packet size */
if (len <= sizeof(struct ether_header)) {
- aprint_error_dev(sc->dev, "invalid packet size %d; dropping\n",
- len);
+ aprint_error_dev(sc->dev,
+ "invalid packet size %d; dropping\n", len);
sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
MTD_RXD_CONF_BUFS;
@@ -674,7 +678,8 @@ mtd_rxirq(struct mtd_softc *sc)
sc->cur_rx = 0;
if (m == NULL) {
- aprint_error_dev(sc->dev, "error pulling packet off interface\n");
+ aprint_error_dev(sc->dev,
+ "error pulling packet off interface\n");
++ifp->if_ierrors;
continue;
}
@@ -723,7 +728,7 @@ mtd_irq_h(void *args)
{
struct mtd_softc *sc = args;
struct ifnet *ifp = &sc->ethercom.ec_if;
- u_int32_t status;
+ uint32_t status;
int r = 0;
if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(sc->dev))
@@ -732,7 +737,7 @@ mtd_irq_h(void *args)
/* Disable interrupts */
MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
- for(;;) {
+ for (;;) {
status = MTD_READ_4(sc, MTD_ISR);
/* Add random seed before masking out bits */
@@ -748,7 +753,8 @@ mtd_irq_h(void *args)
/* NOTE: Perhaps we should reset with some of these errors? */
if (status & MTD_ISR_RXBUN) {
- aprint_error_dev(sc->dev, "receive buffer unavailable\n");
+ aprint_error_dev(sc->dev,
+ "receive buffer unavailable\n");
++ifp->if_ierrors;
}
@@ -758,12 +764,14 @@ mtd_irq_h(void *args)
}
if (status & MTD_ISR_TXBUN) {
- aprint_error_dev(sc->dev, "transmit buffer unavailable\n");
+ aprint_error_dev(sc->dev,
+ "transmit buffer unavailable\n");
++ifp->if_ierrors;
}
if ((status & MTD_ISR_PDF)) {
- aprint_error_dev(sc->dev, "parallel detection fault\n");
+ aprint_error_dev(sc->dev,
+ "parallel detection fault\n");
++ifp->if_ierrors;
}
@@ -811,10 +819,11 @@ mtd_irq_h(void *args)
void
mtd_setmulti(struct mtd_softc *sc)
{
- struct ifnet *ifp = &sc->ethercom.ec_if;
- u_int32_t rxtx_stat;
- u_int32_t hash[2] = {0, 0};
- u_int32_t crc;
+ struct ethercom *ec = &sc->ethercom;
+ struct ifnet *ifp = &ec->ec_if;
+ uint32_t rxtx_stat;
+ uint32_t hash[2] = {0, 0};
+ uint32_t crc;
struct ether_multi *enm;
struct ether_multistep step;
int mcnt = 0;
@@ -830,7 +839,7 @@ mtd_setmulti(struct mtd_softc *sc)
return;
}
- ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
/* We need the 6 most significant bits of the CRC */
crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
@@ -872,9 +881,8 @@ mtd_reset(struct mtd_softc *sc)
break;
}
- if (i == MTD_TIMEOUT) {
+ if (i == MTD_TIMEOUT)
aprint_error_dev(sc->dev, "reset timed out\n");
- }
/* Wait a little so chip can stabilize */
DELAY(1000);
Index: src/sys/dev/ic/seeq8005.c
diff -u src/sys/dev/ic/seeq8005.c:1.61 src/sys/dev/ic/seeq8005.c:1.62
--- src/sys/dev/ic/seeq8005.c:1.61 Tue Feb 5 06:17:02 2019
+++ src/sys/dev/ic/seeq8005.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: seeq8005.c,v 1.61 2019/02/05 06:17:02 msaitoh Exp $ */
+/* $NetBSD: seeq8005.c,v 1.62 2019/05/23 10:40:39 msaitoh Exp $ */
/*
* Copyright (c) 2000, 2001 Ben Harris
@@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: seeq8005.c,v 1.61 2019/02/05 06:17:02 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: seeq8005.c,v 1.62 2019/05/23 10:40:39 msaitoh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -143,7 +143,7 @@ static void ea_await_fifo_full(struct se
static void ea_writebuf(struct seeq8005_softc *, u_char *, int, size_t);
static void ea_readbuf(struct seeq8005_softc *, u_char *, int, size_t);
static void ea_select_buffer(struct seeq8005_softc *, int);
-static void ea_set_address(struct seeq8005_softc *, int, const u_int8_t *);
+static void ea_set_address(struct seeq8005_softc *, int, const uint8_t *);
static void ea_read(struct seeq8005_softc *, int, int);
static struct mbuf *ea_get(struct seeq8005_softc *, int, int, struct ifnet *);
static void ea_txint(struct seeq8005_softc *);
@@ -164,9 +164,10 @@ static u_char* padbuf = NULL;
*/
void
-seeq8005_attach(struct seeq8005_softc *sc, const u_int8_t *myaddr, int *media,
+seeq8005_attach(struct seeq8005_softc *sc, const uint8_t *myaddr, int *media,
int nmedia, int defmedia)
{
+ device_t dev = sc->sc_dev;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh = sc->sc_ioh;
@@ -187,7 +188,8 @@ seeq8005_attach(struct seeq8005_softc *s
SEEQ_WRITE16(sc, iot, ioh, SEEQ_RX_PTR, 0x1234);
if (SEEQ_READ16(sc, iot, ioh, SEEQ_RX_PTR) != 0x1234) {
aprint_normal("\n");
- aprint_error_dev(sc->sc_dev, "Cannot determine data bus width\n");
+ aprint_error_dev(dev,
+ "Cannot determine data bus width\n");
return;
}
}
@@ -240,21 +242,21 @@ seeq8005_attach(struct seeq8005_softc *s
ea_ramtest(sc);
printf("%s: %dKB packet memory, txbuf=%dKB (%d buffers), rxbuf=%dKB",
- device_xname(sc->sc_dev), sc->sc_buffersize >> 10,
+ device_xname(dev), sc->sc_buffersize >> 10,
sc->sc_tx_bufsize >> 10, sc->sc_tx_bufs, sc->sc_rx_bufsize >> 10);
if (padbuf == NULL) {
padbuf = malloc(ETHER_MIN_LEN - ETHER_CRC_LEN, M_DEVBUF,
M_ZERO | M_NOWAIT);
if (padbuf == NULL) {
- aprint_error_dev(sc->sc_dev, "can't allocate pad buffer\n");
+ aprint_error_dev(dev, "can't allocate pad buffer\n");
return;
}
}
/* Initialise ifnet structure. */
- strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
+ strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
ifp->if_softc = sc;
ifp->if_start = ea_start;
ifp->if_ioctl = ea_ioctl;
@@ -275,8 +277,8 @@ seeq8005_attach(struct seeq8005_softc *s
ifmedia_add(&sc->sc_media, media[i], 0, NULL);
ifmedia_set(&sc->sc_media, defmedia);
} else {
- ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
- ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
+ ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
+ ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
}
/* We can support 802.1Q VLAN-sized frames. */
@@ -290,7 +292,7 @@ seeq8005_attach(struct seeq8005_softc *s
printf("\n");
/* After \n because it can print a line of its own. */
- rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
+ rnd_attach_source(&sc->rnd_source, device_xname(dev),
RND_TYPE_NET, RND_FLAG_DEFAULT);
}
@@ -303,8 +305,8 @@ ea_mediachange(struct ifnet *ifp)
struct seeq8005_softc *sc = ifp->if_softc;
if (sc->sc_mediachange)
- return ((*sc->sc_mediachange)(sc));
- return (EINVAL);
+ return (*sc->sc_mediachange)(sc);
+ return EINVAL;
}
/*
@@ -349,7 +351,7 @@ ea_ramtest(struct seeq8005_softc *sc)
SEEQ_WRITE16(sc, iot, ioh, SEEQ_TX_PTR, 0x0000);
SEEQ_WRITE16(sc, iot, ioh, SEEQ_RX_PTR, SEEQ_MAX_BUFFER_SIZE - 2);
-#define SEEQ_RAMTEST_LOOP(value) \
+#define SEEQ_RAMTEST_LOOP(value) \
do { \
/* Set the write start address and write a pattern */ \
ea_writebuf(sc, NULL, 0x0000, 0); \
@@ -359,7 +361,7 @@ do { \
/* Set the read start address and verify the pattern */ \
ea_readbuf(sc, NULL, 0x0000, 0); \
for (loop = 0; loop < SEEQ_MAX_BUFFER_SIZE; loop += 2) \
- if (SEEQ_READ16(sc, iot, ioh, SEEQ_BUFWIN) != (value)) \
+ if (SEEQ_READ16(sc, iot, ioh, SEEQ_BUFWIN) != (value)) \
++sum; \
} while (/*CONSTCOND*/0)
@@ -371,7 +373,8 @@ do { \
/* Report */
if (sum > 0)
- aprint_error_dev(sc->sc_dev, "buffer RAM failed self test, %d faults\n", sum);
+ aprint_error_dev(sc->sc_dev,
+ "buffer RAM failed self test, %d faults\n", sum);
}
@@ -399,7 +402,7 @@ ea_stoptx(struct seeq8005_softc *sc)
/* Stop any tx and wait for confirmation */
SEEQ_WRITE16(sc, iot, ioh, SEEQ_COMMAND,
- sc->sc_command | SEEQ_CMD_TX_OFF);
+ sc->sc_command | SEEQ_CMD_TX_OFF);
timeout = 20000;
do {
@@ -483,9 +486,8 @@ ea_stop(struct ifnet *ifp, int disable)
/* Clear any pending interrupts */
SEEQ_WRITE16(sc, iot, ioh, SEEQ_COMMAND,
- sc->sc_command | SEEQ_CMD_RX_INTACK |
- SEEQ_CMD_TX_INTACK | SEEQ_CMD_DMA_INTACK |
- SEEQ_CMD_BW_INTACK);
+ sc->sc_command | SEEQ_CMD_RX_INTACK | SEEQ_CMD_TX_INTACK |
+ SEEQ_CMD_DMA_INTACK | SEEQ_CMD_BW_INTACK);
if (sc->sc_variant == SEEQ_8004) {
/* Put the chip to sleep */
@@ -581,7 +583,7 @@ ea_await_fifo_full(struct seeq8005_softc
* The buffer address is set to ADDR.
* If len != 0 then data is copied from the address starting at buf
* to the interface buffer.
- * BUF must be usable as a u_int16_t *.
+ * BUF must be usable as a uint16_t *.
* If LEN is odd, it must be safe to overwrite one extra byte.
*/
@@ -595,7 +597,7 @@ ea_writebuf(struct seeq8005_softc *sc, u
SEEQ_READ16(sc, iot, ioh, SEEQ_STATUS)));
#ifdef DIAGNOSTIC
- if (__predict_false(!ALIGNED_POINTER(buf, u_int16_t)))
+ if (__predict_false(!ALIGNED_POINTER(buf, uint16_t)))
panic("%s: unaligned writebuf", device_xname(sc->sc_dev));
if (__predict_false(addr >= SEEQ_MAX_BUFFER_SIZE))
panic("%s: writebuf out of range", device_xname(sc->sc_dev));
@@ -616,11 +618,11 @@ ea_writebuf(struct seeq8005_softc *sc, u
if (len > 0) {
if (sc->sc_flags & SF_8BIT)
bus_space_write_multi_1(iot, ioh, SEEQ_BUFWIN,
- (u_int8_t *)buf, len);
+ (uint8_t *)buf, len);
else
bus_space_write_multi_2(iot, ioh, SEEQ_BUFWIN,
/* LINTED: alignment checked above */
- (u_int16_t *)buf, len / 2);
+ (uint16_t *)buf, len / 2);
}
if (!(sc->sc_flags & SF_8BIT) && len % 2) {
/* Write the last byte */
@@ -636,7 +638,7 @@ ea_writebuf(struct seeq8005_softc *sc, u
* The buffer address is set to ADDR.
* If len != 0 then data is copied from the interface buffer to the
* address starting at buf.
- * BUF must be usable as a u_int16_t *.
+ * BUF must be usable as a uint16_t *.
* If LEN is odd, it must be safe to overwrite one extra byte.
*/
@@ -651,7 +653,7 @@ ea_readbuf(struct seeq8005_softc *sc, u_
SEEQ_READ16(sc, iot, ioh, SEEQ_STATUS), addr, len));
#ifdef DIAGNOSTIC
- if (__predict_false(!ALIGNED_POINTER(buf, u_int16_t)))
+ if (__predict_false(!ALIGNED_POINTER(buf, uint16_t)))
panic("%s: unaligned readbuf", device_xname(sc->sc_dev));
if (__predict_false(addr >= SEEQ_MAX_BUFFER_SIZE))
panic("%s: readbuf out of range", device_xname(sc->sc_dev));
@@ -698,11 +700,11 @@ ea_readbuf(struct seeq8005_softc *sc, u_
if (len > 0) {
if (sc->sc_flags & SF_8BIT)
bus_space_read_multi_1(iot, ioh, SEEQ_BUFWIN,
- (u_int8_t *)buf, len);
+ (uint8_t *)buf, len);
else
bus_space_read_multi_2(iot, ioh, SEEQ_BUFWIN,
/* LINTED: pointer alignment checked above */
- (u_int16_t *)buf, len / 2);
+ (uint16_t *)buf, len / 2);
}
if (!(sc->sc_flags & SF_8BIT) && len % 2) {
/* Read the last byte */
@@ -720,14 +722,13 @@ ea_select_buffer(struct seeq8005_softc *
/* Must be called at splnet */
static void
-ea_set_address(struct seeq8005_softc *sc, int which, const u_int8_t *ea)
+ea_set_address(struct seeq8005_softc *sc, int which, const uint8_t *ea)
{
int i;
ea_select_buffer(sc, SEEQ_BUFCODE_STATION_ADDR0 + which);
for (i = 0; i < ETHER_ADDR_LEN; ++i)
- SEEQ_WRITE16(sc, sc->sc_iot, sc->sc_ioh, SEEQ_BUFWIN,
- ea[i]);
+ SEEQ_WRITE16(sc, sc->sc_iot, sc->sc_ioh, SEEQ_BUFWIN, ea[i]);
}
/*
@@ -773,7 +774,7 @@ ea_init(struct ifnet *ifp)
}
/* Write the station address - the receiver must be off */
- ea_set_address(sc, 0, (const u_int8_t *)CLLADDR(ifp->if_sadl));
+ ea_set_address(sc, 0, (const uint8_t *)CLLADDR(ifp->if_sadl));
/* Split board memory into Rx and Tx. */
ea_select_buffer(sc, SEEQ_BUFCODE_TX_EAP);
@@ -953,7 +954,7 @@ ea_writembuf(struct seeq8005_softc *sc,
{
struct mbuf *m;
int len, nextpacket;
- u_int8_t hdr[4];
+ uint8_t hdr[4];
/*
* Copy the datagram to the packet buffer.
@@ -1045,7 +1046,7 @@ ea_txint(struct seeq8005_softc *sc)
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh = sc->sc_ioh;
- u_int8_t txhdr[4];
+ uint8_t txhdr[4];
u_int txstatus;
ea_readbuf(sc, txhdr, 0x0000, 4);
@@ -1127,7 +1128,7 @@ ea_rxint(struct seeq8005_softc *sc)
int ctrl;
int ptr;
int status;
- u_int8_t rxhdr[4];
+ uint8_t rxhdr[4];
struct ifnet *ifp;
ifp = &sc->sc_ethercom.ec_if;
@@ -1301,7 +1302,7 @@ ea_get(struct seeq8005_softc *sc, int ad
if (top == NULL) {
/* Make sure the payload is aligned */
char *newdata = (char *)
- ALIGN((char*)m->m_data +
+ ALIGN((char*)m->m_data +
sizeof(struct ether_header)) -
sizeof(struct ether_header);
len -= newdata - m->m_data;
@@ -1374,10 +1375,10 @@ ea_mc_reset_8004(struct seeq8005_softc *
struct ethercom *ec = &sc->sc_ethercom;
struct ifnet *ifp = &ec->ec_if;
struct ether_multi *enm;
- u_int32_t crc;
+ uint32_t crc;
int i;
struct ether_multistep step;
- u_int8_t af[8];
+ uint8_t af[8];
/*
* Set up multicast address filter by passing all multicast addresses
@@ -1434,19 +1435,20 @@ ea_mc_reset_8004(struct seeq8005_softc *
static void
ea_mc_reset_8005(struct seeq8005_softc *sc)
{
+ struct ethercom *ec = &sc->sc_ethercom;
struct ether_multi *enm;
struct ether_multistep step;
int naddr, maxaddrs;
naddr = 0;
maxaddrs = 5;
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
/* Have we got space? */
if (naddr >= maxaddrs ||
memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
- sc->sc_ethercom.ec_if.if_flags |= IFF_ALLMULTI;
- ea_ioctl(&sc->sc_ethercom.ec_if, SIOCSIFFLAGS, NULL);
+ ec->ec_if.if_flags |= IFF_ALLMULTI;
+ ea_ioctl(&ec->ec_if, SIOCSIFFLAGS, NULL);
return;
}
ea_set_address(sc, 1 + naddr, enm->enm_addrlo);
Index: src/sys/dev/marvell/if_gfe.c
diff -u src/sys/dev/marvell/if_gfe.c:1.51 src/sys/dev/marvell/if_gfe.c:1.52
--- src/sys/dev/marvell/if_gfe.c:1.51 Mon Apr 22 08:36:03 2019
+++ src/sys/dev/marvell/if_gfe.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_gfe.c,v 1.51 2019/04/22 08:36:03 msaitoh Exp $ */
+/* $NetBSD: if_gfe.c,v 1.52 2019/05/23 10:40:39 msaitoh Exp $ */
/*
* Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.51 2019/04/22 08:36:03 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.52 2019/05/23 10:40:39 msaitoh Exp $");
#include "opt_inet.h"
@@ -130,18 +130,18 @@ enum gfe_hash_op {
(n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \
(ops))
#define GE_RXDPRESYNC(sc, rxq, n) \
- GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
+ GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)
#define GE_RXDPOSTSYNC(sc, rxq, n) \
- GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
+ GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)
#define GE_TXDSYNC(sc, txq, n, ops) \
bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \
(n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \
(ops))
#define GE_TXDPRESYNC(sc, txq, n) \
- GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
+ GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)
#define GE_TXDPOSTSYNC(sc, txq, n) \
- GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
+ GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)
#define STATIC
@@ -406,6 +406,7 @@ gfe_attach(device_t parent, device_t sel
struct marvell_attach_args *mva = aux;
struct gfe_softc * const sc = device_private(self);
struct ifnet * const ifp = &sc->sc_ec.ec_if;
+ struct mii_data * const mii = &sc->sc_mii;
uint32_t sdcr;
int phyaddr, error;
prop_data_t ea;
@@ -494,23 +495,21 @@ gfe_attach(device_t parent, device_t sel
sdcr |= ETH_ESDCR_RIFB;
GE_WRITE(sc, ETH_ESDCR, sdcr);
- sc->sc_mii.mii_ifp = ifp;
- sc->sc_mii.mii_readreg = gfec_mii_read;
- sc->sc_mii.mii_writereg = gfec_mii_write;
- sc->sc_mii.mii_statchg = gfec_mii_statchg;
-
- sc->sc_ec.ec_mii = &sc->sc_mii;
- ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
- ether_mediastatus);
+ mii->mii_ifp = ifp;
+ mii->mii_readreg = gfec_mii_read;
+ mii->mii_writereg = gfec_mii_write;
+ mii->mii_statchg = gfec_mii_statchg;
- mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr,
+ sc->sc_ec.ec_mii = mii;
+ ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
+
+ mii_attach(sc->sc_dev, mii, 0xffffffff, phyaddr,
MII_OFFSET_ANY, MIIF_NOISOLATE);
- if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
- ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
- } else {
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
- }
+ if (LIST_FIRST(&mii->mii_phys) == NULL) {
+ ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
+ } else
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
ifp->if_softc = sc;
@@ -570,7 +569,7 @@ gfe_dmamem_alloc(struct gfe_softc *sc, s
goto fail;
error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs,
- gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map);
+ gdm->gdm_size, 0, BUS_DMA_ALLOCNOW |BUS_DMA_NOWAIT, &gdm->gdm_map);
if (error)
goto fail;
@@ -641,8 +640,8 @@ gfe_ifioctl(struct ifnet *ifp, u_long cm
if ((error = ifioctl_common(ifp, cmd, data)) != 0)
break;
/* XXX re-use ether_ioctl() */
- switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
- case IFF_UP|IFF_RUNNING:/* active->active, update */
+ switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
+ case IFF_UP | IFF_RUNNING:/* active->active, update */
error = gfe_whack(sc, GE_WHACK_CHANGE);
break;
case IFF_RUNNING: /* not up, so we stop */
@@ -833,30 +832,30 @@ gfe_rx_rxqinit(struct gfe_softc *sc, enu
}
bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0,
rxq->rxq_desc_mem.gdm_map->dm_mapsize,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0,
rxq->rxq_buf_mem.gdm_map->dm_mapsize,
BUS_DMASYNC_PREREAD);
- rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError;
+ rxq->rxq_intrbits = ETH_IR_RxBuffer | ETH_IR_RxError;
switch (rxprio) {
case GE_RXPRIO_HI:
- rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3;
+ rxq->rxq_intrbits |= ETH_IR_RxBuffer_3 | ETH_IR_RxError_3;
rxq->rxq_efrdp = ETH_EFRDP3;
rxq->rxq_ecrdp = ETH_ECRDP3;
break;
case GE_RXPRIO_MEDHI:
- rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2;
+ rxq->rxq_intrbits |= ETH_IR_RxBuffer_2 | ETH_IR_RxError_2;
rxq->rxq_efrdp = ETH_EFRDP2;
rxq->rxq_ecrdp = ETH_ECRDP2;
break;
case GE_RXPRIO_MEDLO:
- rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1;
+ rxq->rxq_intrbits |= ETH_IR_RxBuffer_1 | ETH_IR_RxError_1;
rxq->rxq_efrdp = ETH_EFRDP1;
rxq->rxq_ecrdp = ETH_ECRDP1;
break;
case GE_RXPRIO_LO:
- rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0;
+ rxq->rxq_intrbits |= ETH_IR_RxBuffer_0 | ETH_IR_RxError_0;
rxq->rxq_efrdp = ETH_EFRDP0;
rxq->rxq_ecrdp = ETH_ECRDP0;
break;
@@ -902,9 +901,9 @@ gfe_rx_get(struct gfe_softc *sc, enum gf
* or for some reason it's bigger than our frame size,
* ignore it and go to the next packet.
*/
- if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) !=
- (RX_CMD_F|RX_CMD_L) ||
- buflen > sc->sc_max_frame_length) {
+ if ((cmdsts & (RX_CMD_F | RX_CMD_L | RX_STS_ES)) !=
+ (RX_CMD_F | RX_CMD_L) ||
+ (buflen > sc->sc_max_frame_length)) {
GE_DPRINTF(sc, ("!"));
--rxq->rxq_active;
ifp->if_ipackets++;
@@ -1015,7 +1014,7 @@ gfe_rx_process(struct gfe_softc *sc, uin
memset(masks, 0, sizeof(masks));
bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map,
0, rxq->rxq_desc_mem.gdm_size,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (idx = 0; idx < GE_RXDESC_MAX; idx++) {
volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx];
@@ -1024,7 +1023,7 @@ gfe_rx_process(struct gfe_softc *sc, uin
}
bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map,
0, rxq->rxq_desc_mem.gdm_size,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
#if defined(DEBUG)
printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n",
device_xname(sc->sc_dev), rxprio, rxq->rxq_fi,
@@ -1032,7 +1031,7 @@ gfe_rx_process(struct gfe_softc *sc, uin
#endif
}
if ((intrmask & ETH_IR_RxBits) == 0)
- intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError);
+ intrmask &= ~(ETH_IR_RxBuffer | ETH_IR_RxError);
GE_FUNC_EXIT(sc, "");
return intrmask;
@@ -1115,8 +1114,8 @@ gfe_rx_stop(struct gfe_softc *sc, enum g
{
GE_FUNC_ENTER(sc, "gfe_rx_stop");
sc->sc_flags &= ~GE_RXACTIVE;
- sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError);
- sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError);
+ sc->sc_idlemask &= ~(ETH_IR_RxBits | ETH_IR_RxBuffer | ETH_IR_RxError);
+ sc->sc_intrmask &= ~(ETH_IR_RxBits | ETH_IR_RxBuffer | ETH_IR_RxError);
GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask);
GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_AR);
do {
@@ -1148,25 +1147,25 @@ gfe_tick(void *arg)
gfe_ifstart(&sc->sc_ec.ec_if);
if (tickflags & GE_TICK_RX_RESTART) {
intrmask |= sc->sc_idlemask;
- if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) {
+ if (sc->sc_idlemask & (ETH_IR_RxBuffer_3 | ETH_IR_RxError_3)) {
struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI];
rxq->rxq_fi = 0;
GE_WRITE(sc, ETH_EFRDP3, rxq->rxq_desc_busaddr);
GE_WRITE(sc, ETH_ECRDP3, rxq->rxq_desc_busaddr);
}
- if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) {
+ if (sc->sc_idlemask & (ETH_IR_RxBuffer_2 | ETH_IR_RxError_2)) {
struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI];
rxq->rxq_fi = 0;
GE_WRITE(sc, ETH_EFRDP2, rxq->rxq_desc_busaddr);
GE_WRITE(sc, ETH_ECRDP2, rxq->rxq_desc_busaddr);
}
- if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) {
+ if (sc->sc_idlemask & (ETH_IR_RxBuffer_1 | ETH_IR_RxError_1)) {
struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO];
rxq->rxq_fi = 0;
GE_WRITE(sc, ETH_EFRDP1, rxq->rxq_desc_busaddr);
GE_WRITE(sc, ETH_ECRDP1, rxq->rxq_desc_busaddr);
}
- if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) {
+ if (sc->sc_idlemask & (ETH_IR_RxBuffer_0 | ETH_IR_RxError_0)) {
struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO];
rxq->rxq_fi = 0;
GE_WRITE(sc, ETH_EFRDP0, rxq->rxq_desc_busaddr);
@@ -1274,7 +1273,7 @@ gfe_tx_enqueue(struct gfe_softc *sc, enu
if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr &&
txq->txq_outptr + buflen > txq->txq_inptr) {
intrmask |= txq->txq_intrbits &
- (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow);
+ (ETH_IR_TxBufferHigh | ETH_IR_TxBufferLow);
if (sc->sc_intrmask != intrmask) {
sc->sc_intrmask = intrmask;
GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask);
@@ -1305,10 +1304,10 @@ gfe_tx_enqueue(struct gfe_softc *sc, enu
*/
txq->txq_ei_gapcount += buflen;
if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) {
- txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI);
+ txd->ed_cmdsts = htogt32(TX_CMD_FIRST |TX_CMD_LAST |TX_CMD_EI);
txq->txq_ei_gapcount = 0;
} else {
- txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST);
+ txd->ed_cmdsts = htogt32(TX_CMD_FIRST | TX_CMD_LAST);
}
#if 0
GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo,
@@ -1322,7 +1321,7 @@ gfe_tx_enqueue(struct gfe_softc *sc, enu
* Tell the SDMA engine to "Fetch!"
*/
GE_WRITE(sc, ETH_ESDCMR,
- txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL));
+ txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH | ETH_ESDCMR_TXDL));
GE_DPRINTF(sc, ("(%d)", txq->txq_lo));
@@ -1346,7 +1345,7 @@ gfe_tx_enqueue(struct gfe_softc *sc, enu
* an interrupt when the transmit queue finishes processing the
* list. But only update the mask if needs changing.
*/
- intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow);
+ intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh | ETH_IR_TxEndLow);
if (sc->sc_intrmask != intrmask) {
sc->sc_intrmask = intrmask;
GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask);
@@ -1438,8 +1437,10 @@ gfe_tx_done(struct gfe_softc *sc, enum g
panic("%s: transmit fifo%d empty but active count (%d) > 0!",
device_xname(sc->sc_dev), txprio, txq->txq_nactive);
ifp->if_timer = 0;
- intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow));
- intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow));
+ intrmask &=
+ ~(txq->txq_intrbits & (ETH_IR_TxEndHigh | ETH_IR_TxEndLow));
+ intrmask &=
+ ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh | ETH_IR_TxBufferLow));
GE_FUNC_EXIT(sc, "");
return intrmask;
}
@@ -1519,11 +1520,11 @@ gfe_tx_start(struct gfe_softc *sc, enum
txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr =
htogt32(txq->txq_desc_busaddr);
bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0,
- GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
switch (txprio) {
case GE_TXPRIO_HI:
- txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh;
+ txq->txq_intrbits = ETH_IR_TxEndHigh | ETH_IR_TxBufferHigh;
txq->txq_esdcmrbits = ETH_ESDCMR_TXDH;
txq->txq_epsrbits = ETH_EPSR_TxHigh;
txq->txq_ectdp = ETH_ECTDP1;
@@ -1531,7 +1532,7 @@ gfe_tx_start(struct gfe_softc *sc, enum
break;
case GE_TXPRIO_LO:
- txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow;
+ txq->txq_intrbits = ETH_IR_TxEndLow | ETH_IR_TxBufferLow;
txq->txq_esdcmrbits = ETH_ESDCMR_TXDL;
txq->txq_epsrbits = ETH_EPSR_TxLow;
txq->txq_ectdp = ETH_ECTDP0;
@@ -1588,7 +1589,7 @@ gfe_tx_stop(struct gfe_softc *sc, enum g
{
GE_FUNC_ENTER(sc, "gfe_tx_stop");
- GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL);
+ GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_STDH | ETH_ESDCMR_STDL);
sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask);
sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask);
@@ -1631,14 +1632,14 @@ gfe_intr(void *arg)
GE_WRITE(sc, ETH_EICR, ~cause);
#ifndef GE_NORX
- if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError))
+ if (cause & (ETH_IR_RxBuffer | ETH_IR_RxError))
intrmask = gfe_rx_process(sc, cause, intrmask);
#endif
#ifndef GE_NOTX
- if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh))
+ if (cause & (ETH_IR_TxBufferHigh | ETH_IR_TxEndHigh))
intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask);
- if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow))
+ if (cause & (ETH_IR_TxBufferLow | ETH_IR_TxEndLow))
intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask);
#endif
if (cause & ETH_IR_MIIPhySTC) {
@@ -2006,6 +2007,7 @@ gfe_hash_multichg(struct ethercom *ec, c
int
gfe_hash_fill(struct gfe_softc *sc)
{
+ struct ethercom *ec = &sc->sc_ec;
struct ether_multistep step;
struct ether_multi *enm;
int error;
@@ -2013,16 +2015,16 @@ gfe_hash_fill(struct gfe_softc *sc)
GE_FUNC_ENTER(sc, "gfe_hash_fill");
error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI,
- CLLADDR(sc->sc_ec.ec_if.if_sadl));
+ CLLADDR(ec->ec_if.if_sadl));
if (error) {
GE_FUNC_EXIT(sc, "!");
return error;
}
sc->sc_flags &= ~GE_ALLMULTI;
- if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0)
+ if ((ec->ec_if.if_flags & IFF_PROMISC) == 0)
sc->sc_pcr &= ~ETH_EPCR_PM;
- ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
sc->sc_flags |= GE_ALLMULTI;
Index: src/sys/dev/pci/if_kse.c
diff -u src/sys/dev/pci/if_kse.c:1.35 src/sys/dev/pci/if_kse.c:1.36
--- src/sys/dev/pci/if_kse.c:1.35 Fri Apr 26 06:33:34 2019
+++ src/sys/dev/pci/if_kse.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_kse.c,v 1.35 2019/04/26 06:33:34 msaitoh Exp $ */
+/* $NetBSD: if_kse.c,v 1.36 2019/05/23 10:40:39 msaitoh Exp $ */
/*-
* Copyright (c) 2006 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.35 2019/04/26 06:33:34 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.36 2019/05/23 10:40:39 msaitoh Exp $");
#include <sys/param.h>
@@ -1023,7 +1023,8 @@ kse_set_filter(struct kse_softc *sc)
{
struct ether_multistep step;
struct ether_multi *enm;
- struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
uint32_t h, hashes[2];
sc->sc_rxc &= ~(RXC_MHTE | RXC_RM);
@@ -1031,7 +1032,7 @@ kse_set_filter(struct kse_softc *sc)
if (ifp->if_flags & IFF_PROMISC)
return;
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
if (enm == NULL)
return;
hashes[0] = hashes[1] = 0;
Index: src/sys/dev/pci/if_ti.c
diff -u src/sys/dev/pci/if_ti.c:1.108 src/sys/dev/pci/if_ti.c:1.109
--- src/sys/dev/pci/if_ti.c:1.108 Fri Apr 26 06:33:34 2019
+++ src/sys/dev/pci/if_ti.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_ti.c,v 1.108 2019/04/26 06:33:34 msaitoh Exp $ */
+/* $NetBSD: if_ti.c,v 1.109 2019/05/23 10:40:39 msaitoh Exp $ */
/*
* Copyright (c) 1997, 1998, 1999
@@ -81,7 +81,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_ti.c,v 1.108 2019/04/26 06:33:34 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_ti.c,v 1.109 2019/05/23 10:40:39 msaitoh Exp $");
#include "opt_inet.h"
@@ -1145,15 +1145,14 @@ ti_del_mcast(struct ti_softc *sc, struct
static void
ti_setmulti(struct ti_softc *sc)
{
- struct ifnet *ifp;
+ struct ethercom *ec = &sc->ethercom;
+ struct ifnet *ifp = &ec->ec_if;
struct ti_cmd_desc cmd;
struct ti_mc_entry *mc;
uint32_t intrs;
struct ether_multi *enm;
struct ether_multistep step;
- ifp = &sc->ethercom.ec_if;
-
/* Disable interrupts. */
intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
@@ -1169,7 +1168,7 @@ ti_setmulti(struct ti_softc *sc)
* Remember all multicast addresses so that we can delete them
* later. Punt if there is a range of addresses or memory shortage.
*/
- ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
ETHER_ADDR_LEN) != 0)
@@ -1728,7 +1727,7 @@ ti_attach(device_t parent, device_t self
/*
* A Tigon chip was detected. Inform the world.
*/
- aprint_normal_dev(self, "Ethernet address %s\n",ether_sprintf(eaddr));
+ aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
sc->sc_dmat = pa->pa_dmat;
@@ -2723,7 +2722,8 @@ ti_ioctl(struct ifnet *ifp, u_long comma
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
error = EINVAL;
- else if ((error = ifioctl_common(ifp, command, data)) == ENETRESET){
+ else if ((error = ifioctl_common(ifp, command, data))
+ == ENETRESET) {
ti_init(sc);
error = 0;
}
Index: src/sys/dev/pci/if_vge.c
diff -u src/sys/dev/pci/if_vge.c:1.69 src/sys/dev/pci/if_vge.c:1.70
--- src/sys/dev/pci/if_vge.c:1.69 Thu Apr 11 08:50:59 2019
+++ src/sys/dev/pci/if_vge.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vge.c,v 1.69 2019/04/11 08:50:59 msaitoh Exp $ */
+/* $NetBSD: if_vge.c,v 1.70 2019/05/23 10:40:39 msaitoh Exp $ */
/*-
* Copyright (c) 2004
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.69 2019/04/11 08:50:59 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.70 2019/05/23 10:40:39 msaitoh Exp $");
/*
* VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
@@ -371,7 +371,7 @@ vge_read_eeprom(struct vge_softc *sc, in
* EELOAD bit in the CHIPCFG2 register.
*/
CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
- CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
+ CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/);
/* Select the address of the word we want to read */
CSR_WRITE_1(sc, VGE_EEADDR, addr);
@@ -394,7 +394,7 @@ vge_read_eeprom(struct vge_softc *sc, in
word = CSR_READ_2(sc, VGE_EERDDAT);
/* Turn off EEPROM access mode. */
- CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
+ CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/);
CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
return word;
@@ -557,7 +557,7 @@ vge_cam_clear(struct vge_softc *sc)
/* Clear the VLAN filter too. */
- CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
+ CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | VGE_CAMADDR_AVSEL);
for (i = 0; i < 8; i++)
CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
@@ -633,14 +633,14 @@ vge_cam_set(struct vge_softc *sc, uint8_
static void
vge_setmulti(struct vge_softc *sc)
{
- struct ifnet *ifp;
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
int error;
uint32_t h, hashes[2] = { 0, 0 };
struct ether_multi *enm;
struct ether_multistep step;
error = 0;
- ifp = &sc->sc_ethercom.ec_if;
/* First, zot all the multicast entries. */
vge_cam_clear(sc);
@@ -661,7 +661,7 @@ vge_setmulti(struct vge_softc *sc)
}
/* Now program new ones */
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
/*
* If multicast range, fall back to ALLMULTI.
@@ -681,7 +681,7 @@ vge_setmulti(struct vge_softc *sc)
if (error) {
vge_cam_clear(sc);
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
/*
* If multicast range, fall back to ALLMULTI.
@@ -880,6 +880,7 @@ vge_attach(device_t parent, device_t sel
uint8_t *eaddr;
struct vge_softc *sc = device_private(self);
struct ifnet *ifp;
+ struct mii_data * const mii = &sc->sc_mii;
struct pci_attach_args *pa = aux;
pci_chipset_tag_t pc = pa->pa_pc;
const char *intrstr;
@@ -992,21 +993,20 @@ vge_attach(device_t parent, device_t sel
/*
* Initialize our media structures and probe the MII.
*/
- sc->sc_mii.mii_ifp = ifp;
- sc->sc_mii.mii_readreg = vge_miibus_readreg;
- sc->sc_mii.mii_writereg = vge_miibus_writereg;
- sc->sc_mii.mii_statchg = vge_miibus_statchg;
-
- sc->sc_ethercom.ec_mii = &sc->sc_mii;
- ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
- ether_mediastatus);
- mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
+ mii->mii_ifp = ifp;
+ mii->mii_readreg = vge_miibus_readreg;
+ mii->mii_writereg = vge_miibus_writereg;
+ mii->mii_statchg = vge_miibus_statchg;
+
+ sc->sc_ethercom.ec_mii = mii;
+ ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
+ mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, MIIF_DOPAUSE);
- if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
- ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
+ if (LIST_FIRST(&mii->mii_phys) == NULL) {
+ ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
} else
- ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
/*
* Attach the interface.
@@ -1081,7 +1081,7 @@ vge_newbuf(struct vge_softc *sc, int idx
#ifdef DIAGNOSTIC
/* If this descriptor is still owned by the chip, bail. */
- VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rd_sts = le32toh(rxd->rd_sts);
VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
if (rd_sts & VGE_RDSTS_OWN) {
@@ -1099,7 +1099,7 @@ vge_newbuf(struct vge_softc *sc, int idx
vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr);
rxd->rd_sts = 0;
rxd->rd_ctl = 0;
- VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Note: the manual fails to document the fact that for
@@ -1117,7 +1117,7 @@ vge_newbuf(struct vge_softc *sc, int idx
KASSERT(i >= 0);
sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN);
VGE_RXDESCSYNC(sc, i,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
sc->sc_rx_consumed = 0;
}
@@ -1169,7 +1169,7 @@ vge_rxeof(struct vge_softc *sc)
cur_rxd = &sc->sc_rxdescs[idx];
VGE_RXDESCSYNC(sc, idx,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rxstat = le32toh(cur_rxd->rd_sts);
if ((rxstat & VGE_RDSTS_OWN) != 0) {
VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
@@ -1337,7 +1337,7 @@ vge_txeof(struct vge_softc *sc)
sc->sc_tx_free < VGE_NTXDESC;
idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) {
VGE_TXDESCSYNC(sc, idx,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
txstat = le32toh(sc->sc_txdescs[idx].td_sts);
VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
if (txstat & VGE_TDSTS_OWN) {
@@ -1350,7 +1350,7 @@ vge_txeof(struct vge_softc *sc)
bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0,
txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
- if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
+ if (txstat & (VGE_TDSTS_EXCESSCOLL | VGE_TDSTS_COLL))
ifp->if_collisions++;
if (txstat & VGE_TDSTS_TXERR)
ifp->if_oerrors++;
@@ -1446,19 +1446,19 @@ vge_intr(void *arg)
if ((status & VGE_INTRS) == 0)
break;
- if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
+ if (status & (VGE_ISR_RXOK | VGE_ISR_RXOK_HIPRIO))
vge_rxeof(sc);
- if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
+ if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) {
vge_rxeof(sc);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
}
- if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
+ if (status & (VGE_ISR_TXOK0 | VGE_ISR_TIMER0))
vge_txeof(sc);
- if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
+ if (status & (VGE_ISR_TXDMA_STALL | VGE_ISR_RXDMA_STALL))
vge_init(ifp);
if (status & VGE_ISR_LINKSTS)
@@ -1493,7 +1493,7 @@ vge_encap(struct vge_softc *sc, struct m
#ifdef DIAGNOSTIC
/* If this descriptor is still owned by the chip, bail. */
VGE_TXDESCSYNC(sc, idx,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
td_sts = le32toh(txd->td_sts);
VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
if (td_sts & VGE_TDSTS_OWN) {
@@ -1583,10 +1583,10 @@ vge_encap(struct vge_softc *sc, struct m
}
txd->td_ctl = htole32(td_ctl);
txd->td_sts = htole32(td_sts);
- VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts);
- VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->sc_tx_free--;
@@ -1608,7 +1608,7 @@ vge_start(struct ifnet *ifp)
sc = ifp->if_softc;
if (!sc->sc_link ||
- (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
+ (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) {
return;
}
@@ -1666,7 +1666,7 @@ vge_start(struct ifnet *ifp)
sc->sc_txdescs[pidx].td_frag[0].tf_buflen |=
htole16(VGE_TXDESC_Q);
VGE_TXFRAGSYNC(sc, pidx, 1,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if (txs->txs_mbuf != m_head) {
m_freem(m_head);
@@ -1740,7 +1740,7 @@ vge_init(struct ifnet *ifp)
memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap,
VGE_CDTXOFF(0), sizeof(sc->sc_txdescs),
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
for (i = 0; i < VGE_NTXDESC; i++)
sc->sc_txsoft[i].txs_mbuf = NULL;
@@ -1756,18 +1756,18 @@ vge_init(struct ifnet *ifp)
* Set receive FIFO threshold. Also allow transmission and
* reception of VLAN tagged frames.
*/
- CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
- CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
+ CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR | VGE_RXCFG_VTAGOPT);
+ CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES | VGE_VTAG_OPT2);
/* Set DMA burst length */
CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
- CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
+ CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO | VGE_TXCFG_NONBLK);
/* Set collision backoff algorithm */
- CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
- VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
+ CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM |
+ VGE_CHIPCFG1_CAP | VGE_CHIPCFG1_MBA | VGE_CHIPCFG1_BAKOPT);
CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
/* Disable LPSEL field in priority resolution */
@@ -1793,7 +1793,7 @@ vge_init(struct ifnet *ifp)
CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
/* Set up the receive filter -- allow large frames for VLANs. */
- CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
+ CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST | VGE_RXCTL_RX_GIANT);
/* If we want promiscuous mode, set the allframes bit. */
if (ifp->if_flags & IFF_PROMISC) {
@@ -1826,7 +1826,7 @@ vge_init(struct ifnet *ifp)
CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
CSR_WRITE_1(sc, VGE_CRS0,
- VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
+ VGE_CR0_TX_ENABLE | VGE_CR0_RX_ENABLE | VGE_CR0_START);
/*
* Configure one-shot timer for microsecond
@@ -1948,7 +1948,7 @@ vge_ifflags_cb(struct ethercom *ec)
struct vge_softc *sc = ifp->if_softc;
int change = ifp->if_flags ^ sc->sc_if_flags;
- if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
+ if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
return ENETRESET;
else if ((change & IFF_PROMISC) == 0)
return 0;
Index: src/sys/dev/pci/if_vioif.c
diff -u src/sys/dev/pci/if_vioif.c:1.47 src/sys/dev/pci/if_vioif.c:1.48
--- src/sys/dev/pci/if_vioif.c:1.47 Mon Feb 4 02:49:28 2019
+++ src/sys/dev/pci/if_vioif.c Thu May 23 10:40:39 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vioif.c,v 1.47 2019/02/04 02:49:28 yamaguchi Exp $ */
+/* $NetBSD: if_vioif.c,v 1.48 2019/05/23 10:40:39 msaitoh Exp $ */
/*
* Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.47 2019/02/04 02:49:28 yamaguchi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.48 2019/05/23 10:40:39 msaitoh Exp $");
#ifdef _KERNEL_OPT
#include "opt_net_mpsafe.h"
@@ -511,7 +511,8 @@ vioif_alloc_mems(struct vioif_softc *sc)
P(p, ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd));
P(p, ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status));
P(p, ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx));
- P(p, ctrlq->ctrlq_mac_tbl_uc, sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0);
+ P(p, ctrlq->ctrlq_mac_tbl_uc,
+ sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0);
P(p, ctrlq->ctrlq_mac_tbl_mc,
(sizeof(*ctrlq->ctrlq_mac_tbl_mc)
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES));
@@ -546,8 +547,10 @@ vioif_alloc_mems(struct vioif_softc *sc)
rxqsize = rxq->rxq_vq->vq_num;
txqsize = txq->txq_vq->vq_num;
- P(p, rxq->rxq_hdr_dmamaps, sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize);
- P(p, txq->txq_hdr_dmamaps, sizeof(txq->txq_hdr_dmamaps[0]) * txqsize);
+ P(p, rxq->rxq_hdr_dmamaps,
+ sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize);
+ P(p, txq->txq_hdr_dmamaps,
+ sizeof(txq->txq_hdr_dmamaps[0]) * txqsize);
P(p, rxq->rxq_dmamaps, sizeof(rxq->rxq_dmamaps[0]) * rxqsize);
P(p, txq->txq_dmamaps, sizeof(txq->txq_dmamaps[0]) * txqsize);
P(p, rxq->rxq_mbufs, sizeof(rxq->rxq_mbufs[0]) * rxqsize);
@@ -555,17 +558,17 @@ vioif_alloc_mems(struct vioif_softc *sc)
}
#undef P
-#define C(map, size, nsegs, usage) \
- do { \
- r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \
- BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, \
- &map); \
- if (r != 0) { \
- aprint_error_dev(sc->sc_dev, \
- "%s dmamap creation failed, " \
- "error code %d\n", usage, r); \
- goto err_reqs; \
- } \
+#define C(map, size, nsegs, usage) \
+ do { \
+ r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \
+ BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, \
+ &map); \
+ if (r != 0) { \
+ aprint_error_dev(sc->sc_dev, \
+ "%s dmamap creation failed, " \
+ "error code %d\n", usage, r); \
+ goto err_reqs; \
+ } \
} while (0)
#define C_L(map, buf, size, nsegs, rw, usage) \
C(map, size, nsegs, usage); \
@@ -835,7 +838,8 @@ vioif_attach(device_t parent, device_t s
}
snprintf(qname, sizeof(qname), "tx%d", i);
r = virtio_alloc_vq(vsc, txq->txq_vq, nvqs,
- (sizeof(struct virtio_net_hdr) + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
+ (sizeof(struct virtio_net_hdr)
+ + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
VIRTIO_NET_TX_MAXNSEGS + 1, qname);
if (r != 0)
goto err;
@@ -869,7 +873,8 @@ vioif_attach(device_t parent, device_t s
}
}
- sc->sc_ctl_softint = softint_establish(softint_flags, vioif_ctl_softint, sc);
+ sc->sc_ctl_softint = softint_establish(softint_flags,
+ vioif_ctl_softint, sc);
if (sc->sc_ctl_softint == NULL) {
aprint_error_dev(self, "cannot establish ctl softint\n");
goto err;
@@ -1110,7 +1115,8 @@ vioif_stop(struct ifnet *ifp, int disabl
}
static void
-vioif_send_common_locked(struct ifnet *ifp, struct vioif_txqueue *txq, bool is_transmit)
+vioif_send_common_locked(struct ifnet *ifp, struct vioif_txqueue *txq,
+ bool is_transmit)
{
struct vioif_softc *sc = ifp->if_softc;
struct virtio_softc *vsc = sc->sc_virtio;
@@ -1151,7 +1157,7 @@ vioif_send_common_locked(struct ifnet *i
r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
txq->txq_dmamaps[slot],
- m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+ m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (r != 0) {
/* maybe just too fragmented */
struct mbuf *newm;
@@ -1166,7 +1172,7 @@ vioif_send_common_locked(struct ifnet *i
m = newm;
r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
txq->txq_dmamaps[slot],
- m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+ m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (r != 0) {
aprint_error_dev(sc->sc_dev,
"tx dmamap load failed, error code %d\n",
@@ -1357,7 +1363,7 @@ vioif_add_rx_mbuf(struct vioif_rxqueue *
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
rxq->rxq_dmamaps[i],
- m, BUS_DMA_READ|BUS_DMA_NOWAIT);
+ m, BUS_DMA_READ | BUS_DMA_NOWAIT);
if (r) {
m_freem(m);
rxq->rxq_mbufs[i] = NULL;
@@ -1678,7 +1684,7 @@ vioif_ctrl_load_cmdspec(struct vioif_sof
for (i = 0; i < nspecs; i++) {
r = bus_dmamap_load(virtio_dmat(vsc),
specs[i].dmamap, specs[i].buf, specs[i].bufsize,
- NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+ NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (r) {
printf("%s: control command dmamap load failed, "
"error code %d\n", device_xname(sc->sc_dev), r);
@@ -1927,7 +1933,8 @@ vioif_ctrl_vq_done(struct virtqueue *vq)
static int
vioif_rx_filter(struct vioif_softc *sc)
{
- struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ struct ethercom *ec = &sc->sc_ethercom;
+ struct ifnet *ifp = &ec->ec_if;
struct ether_multi *enm;
struct ether_multistep step;
struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
@@ -1946,8 +1953,8 @@ vioif_rx_filter(struct vioif_softc *sc)
}
nentries = -1;
- ETHER_LOCK(&sc->sc_ethercom);
- ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
+ ETHER_LOCK(ec);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (nentries++, enm != NULL) {
if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
allmulti = 1;
@@ -1965,7 +1972,7 @@ vioif_rx_filter(struct vioif_softc *sc)
rxfilter = 1;
set_unlock:
- ETHER_UNLOCK(&sc->sc_ethercom);
+ ETHER_UNLOCK(ec);
set:
if (rxfilter) {
@@ -2087,21 +2094,21 @@ vioif_ctl_softint(void *arg)
}
MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
-
+
#ifdef _MODULE
#include "ioconf.c"
#endif
-
-static int
+
+static int
if_vioif_modcmd(modcmd_t cmd, void *opaque)
{
int error = 0;
-
+
#ifdef _MODULE
switch (cmd) {
case MODULE_CMD_INIT:
- error = config_init_component(cfdriver_ioconf_if_vioif,
- cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
+ error = config_init_component(cfdriver_ioconf_if_vioif,
+ cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
break;
case MODULE_CMD_FINI:
error = config_fini_component(cfdriver_ioconf_if_vioif,
@@ -2109,9 +2116,9 @@ if_vioif_modcmd(modcmd_t cmd, void *opaq
break;
default:
error = ENOTTY;
- break;
+ break;
}
#endif
-
+
return error;
}
Index: src/sys/dev/pci/if_vr.c
diff -u src/sys/dev/pci/if_vr.c:1.127 src/sys/dev/pci/if_vr.c:1.128
--- src/sys/dev/pci/if_vr.c:1.127 Tue Jan 22 03:42:27 2019
+++ src/sys/dev/pci/if_vr.c Thu May 23 10:40:40 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vr.c,v 1.127 2019/01/22 03:42:27 msaitoh Exp $ */
+/* $NetBSD: if_vr.c,v 1.128 2019/05/23 10:40:40 msaitoh Exp $ */
/*-
* Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.127 2019/01/22 03:42:27 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.128 2019/05/23 10:40:40 msaitoh Exp $");
@@ -270,7 +270,7 @@ do { \
((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \
__d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \
VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \
- VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
+ VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
} while (/* CONSTCOND */ 0)
/*
@@ -415,23 +415,23 @@ vr_mii_statchg(struct ifnet *ifp)
IFM_SUBTYPE(sc->vr_mii.mii_media_active) != IFM_NONE) {
sc->vr_link = true;
- if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON))
+ if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON | VR_CMD_RX_ON))
VR_CLRBIT16(sc, VR_COMMAND,
- (VR_CMD_TX_ON|VR_CMD_RX_ON));
+ (VR_CMD_TX_ON | VR_CMD_RX_ON));
if (sc->vr_mii.mii_media_active & IFM_FDX)
VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
else
VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
- VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
+ VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON | VR_CMD_RX_ON);
} else {
sc->vr_link = false;
- VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
+ VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_TX_ON | VR_CMD_RX_ON);
for (i = VR_TIMEOUT; i > 0; i--) {
delay(10);
if (!(CSR_READ_2(sc, VR_COMMAND) &
- (VR_CMD_TX_ON|VR_CMD_RX_ON)))
+ (VR_CMD_TX_ON | VR_CMD_RX_ON)))
break;
}
if (i == 0) {
@@ -452,7 +452,8 @@ vr_mii_statchg(struct ifnet *ifp)
static void
vr_setmulti(struct vr_softc *sc)
{
- struct ifnet *ifp;
+ struct ethercom *ec = &sc->vr_ec;
+ struct ifnet *ifp = &ec->ec_if;
int h = 0;
uint32_t hashes[2] = { 0, 0 };
struct ether_multistep step;
@@ -460,8 +461,6 @@ vr_setmulti(struct vr_softc *sc)
int mcnt = 0;
uint8_t rxfilt;
- ifp = &sc->vr_ec.ec_if;
-
rxfilt = CSR_READ_1(sc, VR_RXCFG);
if (ifp->if_flags & IFF_PROMISC) {
@@ -479,7 +478,7 @@ allmulti:
CSR_WRITE_4(sc, VR_MAR1, 0);
/* now program new ones */
- ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
ETHER_ADDR_LEN) != 0)
@@ -566,7 +565,7 @@ vr_add_rxbuf(struct vr_softc *sc, int i)
error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL,
- BUS_DMA_READ|BUS_DMA_NOWAIT);
+ BUS_DMA_READ | BUS_DMA_NOWAIT);
if (error) {
aprint_error_dev(sc->vr_dev,
"unable to load rx DMA map %d, error = %d\n", i, error);
@@ -601,7 +600,8 @@ vr_rxeof(struct vr_softc *sc)
d = VR_CDRX(sc, i);
ds = VR_DSRX(sc, i);
- VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ VR_CDRXSYNC(sc, i,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rxstat = le32toh(d->vr_status);
@@ -838,7 +838,8 @@ vr_txeof(struct vr_softc *sc)
d = VR_CDTX(sc, i);
ds = VR_DSTX(sc, i);
- VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ VR_CDTXSYNC(sc, i,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
txstat = le32toh(d->vr_status);
@@ -996,7 +997,7 @@ vr_start(struct ifnet *ifp)
struct vr_descsoft *ds;
int error, firsttx, nexttx, opending;
- if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
return;
if (sc->vr_link == false)
return;
@@ -1039,7 +1040,7 @@ vr_start(struct ifnet *ifp)
if ((mtod(m0, uintptr_t) & 3) != 0 ||
m0->m_pkthdr.len < VR_MIN_FRAMELEN ||
bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
- BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
+ BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
aprint_error_dev(sc->vr_dev,
@@ -1067,7 +1068,7 @@ vr_start(struct ifnet *ifp)
m->m_pkthdr.len = m->m_len = VR_MIN_FRAMELEN;
}
error = bus_dmamap_load_mbuf(sc->vr_dmat,
- ds->ds_dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+ ds->ds_dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
aprint_error_dev(sc->vr_dev, "unable to load "
@@ -1115,7 +1116,7 @@ vr_start(struct ifnet *ifp)
d->vr_status = htole32(VR_TXSTAT_OWN);
VR_CDTXSYNC(sc, nexttx,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Advance the tx pointer. */
sc->vr_txpending++;
@@ -1141,7 +1142,7 @@ vr_start(struct ifnet *ifp)
*/
VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
VR_CDTXSYNC(sc, sc->vr_txlast,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* The entire packet chain is set up. Give the
@@ -1149,7 +1150,7 @@ vr_start(struct ifnet *ifp)
*/
VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
VR_CDTXSYNC(sc, firsttx,
- BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Start the transmitter. */
VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
@@ -1202,7 +1203,7 @@ vr_init(struct ifnet *ifp)
d = VR_CDTX(sc, i);
memset(d, 0, sizeof(struct vr_desc));
d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
- VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
sc->vr_txpending = 0;
sc->vr_txdirty = 0;
@@ -1255,8 +1256,8 @@ vr_init(struct ifnet *ifp)
goto out;
/* Enable receiver and transmitter. */
- CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
- VR_CMD_TX_ON|VR_CMD_RX_ON|
+ CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL | VR_CMD_START |
+ VR_CMD_TX_ON | VR_CMD_RX_ON |
VR_CMD_RX_GO);
/* Enable interrupts. */
@@ -1391,7 +1392,7 @@ vr_stop(struct ifnet *ifp, int disable)
ifp->if_timer = 0;
VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
- VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
+ VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON | VR_CMD_TX_ON));
CSR_WRITE_2(sc, VR_IMR, 0x0000);
CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
@@ -1477,6 +1478,7 @@ vr_attach(device_t parent, device_t self
bus_dma_segment_t seg;
uint32_t reg;
struct ifnet *ifp;
+ struct mii_data * const mii = &sc->vr_mii;
uint8_t eaddr[ETHER_ADDR_LEN], mac;
int i, rseg, error;
char intrbuf[PCI_INTRSTR_LEN];
@@ -1584,7 +1586,7 @@ vr_attach(device_t parent, device_t self
* (Note some VT86C100A chip returns a product ID of VT3043)
*/
if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT3043)
- VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
+ VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0 | VR_STICKHW_DS1));
/* Reset the adapter. */
vr_reset(sc);
@@ -1712,21 +1714,21 @@ vr_attach(device_t parent, device_t self
/*
* Initialize MII/media info.
*/
- sc->vr_mii.mii_ifp = ifp;
- sc->vr_mii.mii_readreg = vr_mii_readreg;
- sc->vr_mii.mii_writereg = vr_mii_writereg;
- sc->vr_mii.mii_statchg = vr_mii_statchg;
+ mii->mii_ifp = ifp;
+ mii->mii_readreg = vr_mii_readreg;
+ mii->mii_writereg = vr_mii_writereg;
+ mii->mii_statchg = vr_mii_statchg;
- sc->vr_ec.ec_mii = &sc->vr_mii;
- ifmedia_init(&sc->vr_mii.mii_media, IFM_IMASK, ether_mediachange,
+ sc->vr_ec.ec_mii = mii;
+ ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
ether_mediastatus);
- mii_attach(self, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
+ mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, MIIF_FORCEANEG);
if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
- ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
- ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
+ ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
} else
- ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
+ ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
sc->vr_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
@@ -1796,7 +1798,7 @@ vr_resume(device_t self, const pmf_qual_
struct vr_softc *sc = device_private(self);
if (PCI_PRODUCT(sc->vr_id) != PCI_PRODUCT_VIATECH_VT3043)
- VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
+ VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0 | VR_STICKHW_DS1));
return true;
}
Index: src/sys/dev/usb/if_smsc.c
diff -u src/sys/dev/usb/if_smsc.c:1.43 src/sys/dev/usb/if_smsc.c:1.44
--- src/sys/dev/usb/if_smsc.c:1.43 Tue Mar 5 08:25:03 2019
+++ src/sys/dev/usb/if_smsc.c Thu May 23 10:40:40 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: if_smsc.c,v 1.43 2019/03/05 08:25:03 msaitoh Exp $ */
+/* $NetBSD: if_smsc.c,v 1.44 2019/05/23 10:40:40 msaitoh Exp $ */
/* $OpenBSD: if_smsc.c,v 1.4 2012/09/27 12:38:11 jsg Exp $ */
/* $FreeBSD: src/sys/dev/usb/net/if_smsc.c,v 1.1 2012/08/15 04:03:55 gonzo Exp $ */
@@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_smsc.c,v 1.43 2019/03/05 08:25:03 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_smsc.c,v 1.44 2019/05/23 10:40:40 msaitoh Exp $");
#ifdef _KERNEL_OPT
#include "opt_usb.h"
@@ -460,7 +460,8 @@ smsc_hash(uint8_t addr[ETHER_ADDR_LEN])
void
smsc_setmulti(struct smsc_softc *sc)
{
- struct ifnet * const ifp = &sc->sc_ec.ec_if;
+ struct ethercom *ec = &sc->sc_ec;
+ struct ifnet * const ifp = &ec->ec_if;
struct ether_multi *enm;
struct ether_multistep step;
uint32_t hashtbl[2] = { 0, 0 };
@@ -483,11 +484,11 @@ allmulti:
sc->sc_mac_csr &= ~(SMSC_MAC_CSR_PRMS | SMSC_MAC_CSR_MCPAS);
}
- ETHER_LOCK(&sc->sc_ec);
- ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
+ ETHER_LOCK(ec);
+ ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
- ETHER_UNLOCK(&sc->sc_ec);
+ ETHER_UNLOCK(ec);
goto allmulti;
}
@@ -495,7 +496,7 @@ allmulti:
hashtbl[hash >> 5] |= 1 << (hash & 0x1F);
ETHER_NEXT_MULTI(step, enm);
}
- ETHER_UNLOCK(&sc->sc_ec);
+ ETHER_UNLOCK(ec);
/* Debug */
if (sc->sc_mac_csr & SMSC_MAC_CSR_HPFILT) {
@@ -526,13 +527,13 @@ smsc_sethwcsum(struct smsc_softc *sc)
}
/* Enable/disable the Rx checksum */
- if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx))
+ if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
val |= (SMSC_COE_CTRL_RX_EN | SMSC_COE_CTRL_RX_MODE);
else
val &= ~(SMSC_COE_CTRL_RX_EN | SMSC_COE_CTRL_RX_MODE);
/* Enable/disable the Tx checksum (currently not supported) */
- if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx))
+ if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx))
val |= SMSC_COE_CTRL_TX_EN;
else
val &= ~SMSC_COE_CTRL_TX_EN;
@@ -711,7 +712,7 @@ smsc_start_locked(struct ifnet *ifp)
return;
}
- if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) {
+ if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) {
smsc_dbg_printf(sc, "%s: not running\n", __func__);
return;
}