Re: [PATCH V4 XRT Alveo 09/20] fpga: xrt: management physical function driver (root)

2021-04-14 Thread Tom Rix



On 4/9/21 11:50 AM, Max Zhen wrote:

Hi Tom,


On 3/31/21 6:03 AM, Tom Rix wrote:

On 3/23/21 10:29 PM, Lizhi Hou wrote:

The PCIE device driver which attaches to management function on Alveo
devices. It instantiates one or more group drivers which, in turn,
instantiate platform drivers. The instantiation of group and platform
drivers is completely dtb driven.

Signed-off-by: Sonal Santan
Signed-off-by: Max Zhen
Signed-off-by: Lizhi Hou
---
  drivers/fpga/xrt/mgmt/root.c | 333 
+++

  1 file changed, 333 insertions(+)
  create mode 100644 drivers/fpga/xrt/mgmt/root.c

diff --git a/drivers/fpga/xrt/mgmt/root.c 
b/drivers/fpga/xrt/mgmt/root.c

new file mode 100644
index ..f97f92807c01
--- /dev/null
+++ b/drivers/fpga/xrt/mgmt/root.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Alveo Management Function Driver
+ *
+ * Copyright (C) 2020-2021 Xilinx, Inc.
+ *
+ * Authors:
+ *   Cheng Zhen
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "xroot.h"
+#include "xmgnt.h"
+#include "metadata.h"
+
+#define XMGMT_MODULE_NAME    "xrt-mgmt"

ok

+#define XMGMT_DRIVER_VERSION "4.0.0"
+
+#define XMGMT_PDEV(xm)   ((xm)->pdev)
+#define XMGMT_DEV(xm) (&(XMGMT_PDEV(xm)->dev))
+#define xmgmt_err(xm, fmt, args...)  \
+ dev_err(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_warn(xm, fmt, args...) \
+ dev_warn(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_info(xm, fmt, args...) \
+ dev_info(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_dbg(xm, fmt, args...)  \
+ dev_dbg(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define XMGMT_DEV_ID(_pcidev)    \
+ ({ typeof(_pcidev) (pcidev) = (_pcidev);    \
+ ((pci_domain_nr((pcidev)->bus) << 16) | \
+ PCI_DEVID((pcidev)->bus->number, 0)); })
+
+static struct class *xmgmt_class;
+
+/* PCI Device IDs */

add a comment on what a golden image is here something like

/*

* Golden image is preloaded on the device when it is shipped to 
customer.


* Then, customer can load other shells (from Xilinx or some other 
vendor).


* If something goes wrong with the shell, customer can always go back to

* golden and start over again.

*/



Will do.



+#define PCI_DEVICE_ID_U50_GOLDEN 0xD020
+#define PCI_DEVICE_ID_U50    0x5020
+static const struct pci_device_id xmgmt_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50_GOLDEN), 
}, /* Alveo U50 (golden) */
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50), }, /* 
Alveo U50 */

+ { 0, }
+};
+
+struct xmgmt {
+ struct pci_dev *pdev;
+ void *root;
+
+ bool ready;
+};
+
+static int xmgmt_config_pci(struct xmgmt *xm)
+{
+ struct pci_dev *pdev = XMGMT_PDEV(xm);
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc < 0) {
+ xmgmt_err(xm, "failed to enable device: %d", rc);
+ return rc;
+ }
+
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc)

ok

+ xmgmt_warn(xm, "failed to enable AER: %d", rc);
+
+ pci_set_master(pdev);
+
+ rc = pcie_get_readrq(pdev);
+ if (rc > 512)

512 is magic number, change this to a #define



Will do.



+ pcie_set_readrq(pdev, 512);
+ return 0;
+}
+
+static int xmgmt_match_slot_and_save(struct device *dev, void *data)
+{
+ struct xmgmt *xm = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (XMGMT_DEV_ID(pdev) == XMGMT_DEV_ID(xm->pdev)) {
+ pci_cfg_access_lock(pdev);
+ pci_save_state(pdev);
+ }
+
+ return 0;
+}
+
+static void xmgmt_pci_save_config_all(struct xmgmt *xm)
+{
+ bus_for_each_dev(_bus_type, NULL, xm, 
xmgmt_match_slot_and_save);

refactor expected in v5 when pseudo bus change happens.



There might be some mis-understanding here...

No matter how we reorganize our code (using platform_device bus type 
or defining our own bus type), it's a driver that drives a PCIE device 
after all. So, this mgmt/root.c must be a PCIE driver, which may 
interact with a whole bunch of IP drivers through a pseudo bus we are 
about to create.


What this code is doing here is completely of PCIE business (PCIE 
config space access). So, I think it is appropriate code in a PCIE 
driver.


The PCIE device we are driving is a multi-function device. The mgmt pf 
is of function 0, which, according to PCIE spec, can manage other 
functions on the same device. So, I think it's appropriate for mgmt pf 
driver (this root driver) to find it's peer function (through PCIE bus 
type) on the same device and do something about it in certain special 
cases.


Please let me know why you expect this code to be refactored and how 
you want it to be refactored. I might have missed something here...



ok, i get it.

thanks for the explanation.

Tom




+}
+
+static int xmgmt_match_slot_and_restore(struct device *dev, void 
*data)

+{
+ struct xmgmt *xm = 

Re: [PATCH V4 XRT Alveo 09/20] fpga: xrt: management physical function driver (root)

2021-04-09 Thread Max Zhen

Hi Tom,


On 3/31/21 6:03 AM, Tom Rix wrote:

On 3/23/21 10:29 PM, Lizhi Hou wrote:

The PCIE device driver which attaches to management function on Alveo
devices. It instantiates one or more group drivers which, in turn,
instantiate platform drivers. The instantiation of group and platform
drivers is completely dtb driven.

Signed-off-by: Sonal Santan
Signed-off-by: Max Zhen
Signed-off-by: Lizhi Hou
---
  drivers/fpga/xrt/mgmt/root.c | 333 +++
  1 file changed, 333 insertions(+)
  create mode 100644 drivers/fpga/xrt/mgmt/root.c

diff --git a/drivers/fpga/xrt/mgmt/root.c b/drivers/fpga/xrt/mgmt/root.c
new file mode 100644
index ..f97f92807c01
--- /dev/null
+++ b/drivers/fpga/xrt/mgmt/root.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Alveo Management Function Driver
+ *
+ * Copyright (C) 2020-2021 Xilinx, Inc.
+ *
+ * Authors:
+ *   Cheng Zhen
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "xroot.h"
+#include "xmgnt.h"
+#include "metadata.h"
+
+#define XMGMT_MODULE_NAME"xrt-mgmt"

ok

+#define XMGMT_DRIVER_VERSION "4.0.0"
+
+#define XMGMT_PDEV(xm)   ((xm)->pdev)
+#define XMGMT_DEV(xm)(&(XMGMT_PDEV(xm)->dev))
+#define xmgmt_err(xm, fmt, args...)  \
+ dev_err(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_warn(xm, fmt, args...) \
+ dev_warn(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_info(xm, fmt, args...) \
+ dev_info(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_dbg(xm, fmt, args...)  \
+ dev_dbg(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define XMGMT_DEV_ID(_pcidev)\
+ ({ typeof(_pcidev) (pcidev) = (_pcidev);\
+ ((pci_domain_nr((pcidev)->bus) << 16) | \
+ PCI_DEVID((pcidev)->bus->number, 0)); })
+
+static struct class *xmgmt_class;
+
+/* PCI Device IDs */

add a comment on what a golden image is here something like

/*

* Golden image is preloaded on the device when it is shipped to customer.

* Then, customer can load other shells (from Xilinx or some other vendor).

* If something goes wrong with the shell, customer can always go back to

* golden and start over again.

*/



Will do.



+#define PCI_DEVICE_ID_U50_GOLDEN 0xD020
+#define PCI_DEVICE_ID_U500x5020
+static const struct pci_device_id xmgmt_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50_GOLDEN), }, /* Alveo 
U50 (golden) */
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50), }, /* Alveo U50 */
+ { 0, }
+};
+
+struct xmgmt {
+ struct pci_dev *pdev;
+ void *root;
+
+ bool ready;
+};
+
+static int xmgmt_config_pci(struct xmgmt *xm)
+{
+ struct pci_dev *pdev = XMGMT_PDEV(xm);
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc < 0) {
+ xmgmt_err(xm, "failed to enable device: %d", rc);
+ return rc;
+ }
+
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc)

ok

+ xmgmt_warn(xm, "failed to enable AER: %d", rc);
+
+ pci_set_master(pdev);
+
+ rc = pcie_get_readrq(pdev);
+ if (rc > 512)

512 is magic number, change this to a #define



Will do.



+ pcie_set_readrq(pdev, 512);
+ return 0;
+}
+
+static int xmgmt_match_slot_and_save(struct device *dev, void *data)
+{
+ struct xmgmt *xm = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (XMGMT_DEV_ID(pdev) == XMGMT_DEV_ID(xm->pdev)) {
+ pci_cfg_access_lock(pdev);
+ pci_save_state(pdev);
+ }
+
+ return 0;
+}
+
+static void xmgmt_pci_save_config_all(struct xmgmt *xm)
+{
+ bus_for_each_dev(_bus_type, NULL, xm, xmgmt_match_slot_and_save);

refactor expected in v5 when pseudo bus change happens.



There might be some mis-understanding here...

No matter how we reorganize our code (using platform_device bus type or 
defining our own bus type), it's a driver that drives a PCIE device 
after all. So, this mgmt/root.c must be a PCIE driver, which may 
interact with a whole bunch of IP drivers through a pseudo bus we are 
about to create.


What this code is doing here is completely of PCIE business (PCIE config 
space access). So, I think it is appropriate code in a PCIE driver.


The PCIE device we are driving is a multi-function device. The mgmt pf 
is of function 0, which, according to PCIE spec, can manage other 
functions on the same device. So, I think it's appropriate for mgmt pf 
driver (this root driver) to find it's peer function (through PCIE bus 
type) on the same device and do something about it in certain special cases.


Please let me know why you expect this code to be refactored and how you 
want it to be refactored. I might have missed something here...




+}
+
+static int xmgmt_match_slot_and_restore(struct device *dev, void *data)
+{
+ struct xmgmt *xm = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (XMGMT_DEV_ID(pdev) == 

Re: [PATCH V4 XRT Alveo 09/20] fpga: xrt: management physical function driver (root)

2021-03-31 Thread Tom Rix


On 3/23/21 10:29 PM, Lizhi Hou wrote:
> The PCIE device driver which attaches to management function on Alveo
> devices. It instantiates one or more group drivers which, in turn,
> instantiate platform drivers. The instantiation of group and platform
> drivers is completely dtb driven.
>
> Signed-off-by: Sonal Santan 
> Signed-off-by: Max Zhen 
> Signed-off-by: Lizhi Hou 
> ---
>  drivers/fpga/xrt/mgmt/root.c | 333 +++
>  1 file changed, 333 insertions(+)
>  create mode 100644 drivers/fpga/xrt/mgmt/root.c
>
> diff --git a/drivers/fpga/xrt/mgmt/root.c b/drivers/fpga/xrt/mgmt/root.c
> new file mode 100644
> index ..f97f92807c01
> --- /dev/null
> +++ b/drivers/fpga/xrt/mgmt/root.c
> @@ -0,0 +1,333 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Xilinx Alveo Management Function Driver
> + *
> + * Copyright (C) 2020-2021 Xilinx, Inc.
> + *
> + * Authors:
> + *   Cheng Zhen 
> + */
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include "xroot.h"
> +#include "xmgnt.h"
> +#include "metadata.h"
> +
> +#define XMGMT_MODULE_NAME"xrt-mgmt"
ok
> +#define XMGMT_DRIVER_VERSION "4.0.0"
> +
> +#define XMGMT_PDEV(xm)   ((xm)->pdev)
> +#define XMGMT_DEV(xm)(&(XMGMT_PDEV(xm)->dev))
> +#define xmgmt_err(xm, fmt, args...)  \
> + dev_err(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
> +#define xmgmt_warn(xm, fmt, args...) \
> + dev_warn(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
> +#define xmgmt_info(xm, fmt, args...) \
> + dev_info(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
> +#define xmgmt_dbg(xm, fmt, args...)  \
> + dev_dbg(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
> +#define XMGMT_DEV_ID(_pcidev)\
> + ({ typeof(_pcidev) (pcidev) = (_pcidev);\
> + ((pci_domain_nr((pcidev)->bus) << 16) | \
> + PCI_DEVID((pcidev)->bus->number, 0)); })
> +
> +static struct class *xmgmt_class;
> +
> +/* PCI Device IDs */

add a comment on what a golden image is here something like

/*

* Golden image is preloaded on the device when it is shipped to customer.

* Then, customer can load other shells (from Xilinx or some other vendor).

* If something goes wrong with the shell, customer can always go back to

* golden and start over again.

*/


> +#define PCI_DEVICE_ID_U50_GOLDEN 0xD020
> +#define PCI_DEVICE_ID_U500x5020
> +static const struct pci_device_id xmgmt_pci_ids[] = {
> + { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50_GOLDEN), }, /* 
> Alveo U50 (golden) */
> + { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50), }, /* Alveo U50 
> */
> + { 0, }
> +};
> +
> +struct xmgmt {
> + struct pci_dev *pdev;
> + void *root;
> +
> + bool ready;
> +};
> +
> +static int xmgmt_config_pci(struct xmgmt *xm)
> +{
> + struct pci_dev *pdev = XMGMT_PDEV(xm);
> + int rc;
> +
> + rc = pcim_enable_device(pdev);
> + if (rc < 0) {
> + xmgmt_err(xm, "failed to enable device: %d", rc);
> + return rc;
> + }
> +
> + rc = pci_enable_pcie_error_reporting(pdev);
> + if (rc)
ok
> + xmgmt_warn(xm, "failed to enable AER: %d", rc);
> +
> + pci_set_master(pdev);
> +
> + rc = pcie_get_readrq(pdev);
> + if (rc > 512)
512 is magic number, change this to a #define
> + pcie_set_readrq(pdev, 512);
> + return 0;
> +}
> +
> +static int xmgmt_match_slot_and_save(struct device *dev, void *data)
> +{
> + struct xmgmt *xm = data;
> + struct pci_dev *pdev = to_pci_dev(dev);
> +
> + if (XMGMT_DEV_ID(pdev) == XMGMT_DEV_ID(xm->pdev)) {
> + pci_cfg_access_lock(pdev);
> + pci_save_state(pdev);
> + }
> +
> + return 0;
> +}
> +
> +static void xmgmt_pci_save_config_all(struct xmgmt *xm)
> +{
> + bus_for_each_dev(_bus_type, NULL, xm, xmgmt_match_slot_and_save);
refactor expected in v5 when pseudo bus change happens.
> +}
> +
> +static int xmgmt_match_slot_and_restore(struct device *dev, void *data)
> +{
> + struct xmgmt *xm = data;
> + struct pci_dev *pdev = to_pci_dev(dev);
> +
> + if (XMGMT_DEV_ID(pdev) == XMGMT_DEV_ID(xm->pdev)) {
> + pci_restore_state(pdev);
> + pci_cfg_access_unlock(pdev);
> + }
> +
> + return 0;
> +}
> +
> +static void xmgmt_pci_restore_config_all(struct xmgmt *xm)
> +{
> + bus_for_each_dev(_bus_type, NULL, xm, xmgmt_match_slot_and_restore);
> +}
> +
> +static void xmgmt_root_hot_reset(struct pci_dev *pdev)
> +{
> + struct xmgmt *xm = pci_get_drvdata(pdev);
> + struct pci_bus *bus;
> + u8 pci_bctl;
> + u16 pci_cmd, devctl;
> + int i, ret;
> +
> + xmgmt_info(xm, "hot reset start");
> +
> + xmgmt_pci_save_config_all(xm);
> +
> + pci_disable_device(pdev);
> +
> + bus = pdev->bus;
whitespace, all these nl's are not needed
> +
> + /*
> +  * When flipping the SBR bit, device can fall off the bus. This is
> +  * usually no 

[PATCH V4 XRT Alveo 09/20] fpga: xrt: management physical function driver (root)

2021-03-23 Thread Lizhi Hou
The PCIE device driver which attaches to management function on Alveo
devices. It instantiates one or more group drivers which, in turn,
instantiate platform drivers. The instantiation of group and platform
drivers is completely dtb driven.

Signed-off-by: Sonal Santan 
Signed-off-by: Max Zhen 
Signed-off-by: Lizhi Hou 
---
 drivers/fpga/xrt/mgmt/root.c | 333 +++
 1 file changed, 333 insertions(+)
 create mode 100644 drivers/fpga/xrt/mgmt/root.c

diff --git a/drivers/fpga/xrt/mgmt/root.c b/drivers/fpga/xrt/mgmt/root.c
new file mode 100644
index ..f97f92807c01
--- /dev/null
+++ b/drivers/fpga/xrt/mgmt/root.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Alveo Management Function Driver
+ *
+ * Copyright (C) 2020-2021 Xilinx, Inc.
+ *
+ * Authors:
+ * Cheng Zhen 
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "xroot.h"
+#include "xmgnt.h"
+#include "metadata.h"
+
+#define XMGMT_MODULE_NAME  "xrt-mgmt"
+#define XMGMT_DRIVER_VERSION   "4.0.0"
+
+#define XMGMT_PDEV(xm) ((xm)->pdev)
+#define XMGMT_DEV(xm)  (&(XMGMT_PDEV(xm)->dev))
+#define xmgmt_err(xm, fmt, args...)\
+   dev_err(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_warn(xm, fmt, args...)   \
+   dev_warn(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_info(xm, fmt, args...)   \
+   dev_info(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgmt_dbg(xm, fmt, args...)\
+   dev_dbg(XMGMT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define XMGMT_DEV_ID(_pcidev)  \
+   ({ typeof(_pcidev) (pcidev) = (_pcidev);\
+   ((pci_domain_nr((pcidev)->bus) << 16) | \
+   PCI_DEVID((pcidev)->bus->number, 0)); })
+
+static struct class *xmgmt_class;
+
+/* PCI Device IDs */
+#define PCI_DEVICE_ID_U50_GOLDEN   0xD020
+#define PCI_DEVICE_ID_U50  0x5020
+static const struct pci_device_id xmgmt_pci_ids[] = {
+   { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50_GOLDEN), }, /* 
Alveo U50 (golden) */
+   { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50), }, /* Alveo U50 
*/
+   { 0, }
+};
+
+struct xmgmt {
+   struct pci_dev *pdev;
+   void *root;
+
+   bool ready;
+};
+
+static int xmgmt_config_pci(struct xmgmt *xm)
+{
+   struct pci_dev *pdev = XMGMT_PDEV(xm);
+   int rc;
+
+   rc = pcim_enable_device(pdev);
+   if (rc < 0) {
+   xmgmt_err(xm, "failed to enable device: %d", rc);
+   return rc;
+   }
+
+   rc = pci_enable_pcie_error_reporting(pdev);
+   if (rc)
+   xmgmt_warn(xm, "failed to enable AER: %d", rc);
+
+   pci_set_master(pdev);
+
+   rc = pcie_get_readrq(pdev);
+   if (rc > 512)
+   pcie_set_readrq(pdev, 512);
+   return 0;
+}
+
+static int xmgmt_match_slot_and_save(struct device *dev, void *data)
+{
+   struct xmgmt *xm = data;
+   struct pci_dev *pdev = to_pci_dev(dev);
+
+   if (XMGMT_DEV_ID(pdev) == XMGMT_DEV_ID(xm->pdev)) {
+   pci_cfg_access_lock(pdev);
+   pci_save_state(pdev);
+   }
+
+   return 0;
+}
+
+static void xmgmt_pci_save_config_all(struct xmgmt *xm)
+{
+   bus_for_each_dev(_bus_type, NULL, xm, xmgmt_match_slot_and_save);
+}
+
+static int xmgmt_match_slot_and_restore(struct device *dev, void *data)
+{
+   struct xmgmt *xm = data;
+   struct pci_dev *pdev = to_pci_dev(dev);
+
+   if (XMGMT_DEV_ID(pdev) == XMGMT_DEV_ID(xm->pdev)) {
+   pci_restore_state(pdev);
+   pci_cfg_access_unlock(pdev);
+   }
+
+   return 0;
+}
+
+static void xmgmt_pci_restore_config_all(struct xmgmt *xm)
+{
+   bus_for_each_dev(_bus_type, NULL, xm, xmgmt_match_slot_and_restore);
+}
+
+static void xmgmt_root_hot_reset(struct pci_dev *pdev)
+{
+   struct xmgmt *xm = pci_get_drvdata(pdev);
+   struct pci_bus *bus;
+   u8 pci_bctl;
+   u16 pci_cmd, devctl;
+   int i, ret;
+
+   xmgmt_info(xm, "hot reset start");
+
+   xmgmt_pci_save_config_all(xm);
+
+   pci_disable_device(pdev);
+
+   bus = pdev->bus;
+
+   /*
+* When flipping the SBR bit, device can fall off the bus. This is
+* usually no problem at all so long as drivers are working properly
+* after SBR. However, some systems complain bitterly when the device
+* falls off the bus.
+* The quick solution is to temporarily disable the SERR reporting of
+* switch port during SBR.
+*/
+
+   pci_read_config_word(bus->self, PCI_COMMAND, _cmd);
+   pci_write_config_word(bus->self, PCI_COMMAND, (pci_cmd & 
~PCI_COMMAND_SERR));
+   pcie_capability_read_word(bus->self, PCI_EXP_DEVCTL, );
+   pcie_capability_write_word(bus->self, PCI_EXP_DEVCTL, (devctl & 
~PCI_EXP_DEVCTL_FERE));
+   pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, _bctl);
+   pci_write_config_byte(bus->self,