Re: PATCH: VMD fixes for PCI Config Space and BAR Allocation [passthrough PCI support]

2020-08-19 Thread Dave Voutila


Jordan Hargrave writes:

> This is the first patch for adding PCI passthrough support to VMD.
> I am splitting up the necessary changes into smaller patches.
>
> This code fixes the pci device union for accessing PCI config space >= 0x40
> pcidump -xxx would return garbage data due to union overlap

I can confirm at least in an OpenBSD guest that pcidump is returning
something different for the virtio devices:

24,26c24,26
<   0x0040: 0001   
<   0x0050: 0010   
<   0x0060:  2000 ff4188fd 000b
---
>   0x0040:    
>   0x0050:    
>   0x0060:    
41,43c41,43
<   0x0040: 0001   
<   0x0050: 0010   
<   0x0060:  1000 ff418911 000b
---
>   0x0040:    
>   0x0050:    
>   0x0060:    
46c46
<   0x0090:   01d25048 000c
---
>   0x0090:    
58,60c58,60
<   0x0040: 0001   
<   0x0050: 0010   
<   0x0060:  d000 ff418909 000b
---
>   0x0040:    
>   0x0050:    
>   0x0060:    
63c63
<   0x0090:   019adaf5 000c
---
>   0x0090:    
75,77c75,77
<   0x0040: 0001   
<   0x0050: 0010   
<   0x0060:  2000 ff418924 000b
---
>   0x0040:    
>   0x0050:    
>   0x0060:    


I'll apply it to my other system where I keep a mix of OpenBSD and Linux
guests running.

>
> pci_add_bar now requires specifying the BAR area size to allocate
>
> Extra cookie argument added to pci_add_device
>

PCI isn't something I grok, so hopefully someone else can take a look at
this to see if it makes sense.

-Dave Voutila



PATCH: VMD fixes for PCI Config Space and BAR Allocation [passthrough PCI support]

2020-08-18 Thread Jordan Hargrave
This is the first patch for adding PCI passthrough support to VMD.  
I am splitting up the necessary changes into smaller patches.

This code fixes the pci device union for accessing PCI config space >= 0x40
pcidump -xxx would return garbage data due to union overlap

pci_add_bar now requires specifying the BAR area size to allocate

Extra cookie argument added to pci_add_device

---
 usr.sbin/vmd/pci.c| 50 +++
 usr.sbin/vmd/pci.h| 80 ++-
 usr.sbin/vmd/virtio.c | 25 +++---
 3 files changed, 89 insertions(+), 66 deletions(-)

diff --git a/usr.sbin/vmd/pci.c b/usr.sbin/vmd/pci.c
index 954235eb6..4ce393237 100644
--- a/usr.sbin/vmd/pci.c
+++ b/usr.sbin/vmd/pci.c
@@ -39,28 +39,47 @@ extern char *__progname;
 const uint8_t pci_pic_irqs[PCI_MAX_PIC_IRQS] = {3, 5, 6, 7, 9, 10, 11, 12,
 14, 15};
 
+/*
+ * pci_mkbar
+ *
+ * Calculates BAR address given a size and alignment.
+ * Returns allocated address and updates next address
+ * Returns zero if address is out of range
+ */
+static uint64_t
+pci_mkbar(uint64_t *base, uint32_t size, uint32_t align, uint64_t maxbase)
+{
+   uint64_t mask = size - 1;
+   uint64_t cbase;
+
+   if (*base >= maxbase)
+   return (0);
+   cbase = *base;
+   *base = (*base + size + mask) & ~mask;
+   return cbase;
+}
+
 /*
  * pci_add_bar
  *
  * Adds a BAR for the PCI device 'id'. On access, 'barfn' will be
  * called, and passed 'cookie' as an identifier.
  *
- * BARs are fixed size, meaning all I/O BARs requested have the
- * same size and all MMIO BARs have the same size.
- *
  * Parameters:
  *  id: PCI device to add the BAR to (local count, eg if id == 4,
  *  this BAR is to be added to the VM's 5th PCI device)
  *  type: type of the BAR to add (PCI_MAPREG_TYPE_xxx)
+ *  size: Size of BAR area
  *  barfn: callback function invoked on BAR access
  *  cookie: cookie passed to barfn on access
  *
  * Returns 0 if the BAR was added successfully, 1 otherwise.
  */
 int
-pci_add_bar(uint8_t id, uint32_t type, void *barfn, void *cookie)
+pci_add_bar(uint8_t id, uint32_t type, uint32_t size, void *barfn, void 
*cookie)
 {
uint8_t bar_reg_idx, bar_ct;
+   uint64_t base = 0;
 
/* Check id */
if (id >= pci.pci_dev_ct)
@@ -74,31 +93,31 @@ pci_add_bar(uint8_t id, uint32_t type, void *barfn, void 
*cookie)
/* Compute BAR address and add */
bar_reg_idx = (PCI_MAPREG_START + (bar_ct * 4)) / 4;
if (type == PCI_MAPREG_TYPE_MEM) {
-   if (pci.pci_next_mmio_bar >= VMM_PCI_MMIO_BAR_END)
+   base = pci_mkbar(&pci.pci_next_mmio_bar, size, 4096, 
VMM_PCI_MMIO_BAR_END);
+   if (base == 0)
return (1);
 
pci.pci_devices[id].pd_cfg_space[bar_reg_idx] =
-   PCI_MAPREG_MEM_ADDR(pci.pci_next_mmio_bar);
-   pci.pci_next_mmio_bar += VMM_PCI_MMIO_BAR_SIZE;
+   PCI_MAPREG_MEM_ADDR(base);
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn;
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie;
pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_MMIO;
-   pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_MMIO_BAR_SIZE;
+   pci.pci_devices[id].pd_barsize[bar_ct] = size;
pci.pci_devices[id].pd_bar_ct++;
} else if (type == PCI_MAPREG_TYPE_IO) {
-   if (pci.pci_next_io_bar >= VMM_PCI_IO_BAR_END)
+   base = pci_mkbar(&pci.pci_next_io_bar, size, 4, 
VMM_PCI_IO_BAR_END);
+   if (base == 0)
return (1);
 
pci.pci_devices[id].pd_cfg_space[bar_reg_idx] =
-   PCI_MAPREG_IO_ADDR(pci.pci_next_io_bar) |
+   PCI_MAPREG_IO_ADDR(base) |
PCI_MAPREG_TYPE_IO;
-   pci.pci_next_io_bar += VMM_PCI_IO_BAR_SIZE;
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn;
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie;
DPRINTF("%s: adding pci bar cookie for dev %d bar %d = %p",
__progname, id, bar_ct, cookie);
pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_IO;
-   pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_IO_BAR_SIZE;
+   pci.pci_devices[id].pd_barsize[bar_ct] = size;
pci.pci_devices[id].pd_bar_ct++;
}
 
@@ -165,7 +184,7 @@ pci_get_dev_irq(uint8_t id)
 int
 pci_add_device(uint8_t *id, uint16_t vid, uint16_t pid, uint8_t class,
 uint8_t subclass, uint16_t subsys_vid, uint16_t subsys_id,
-uint8_t irq_needed, pci_cs_fn_t csfunc)
+uint8_t irq_needed, pci_cs_fn_t csfunc, void *cookie)
 {
/* Exceeded max devices? */
if (pci.pci_dev_ct >= PCI_CONFIG_MAX_DEV)
@@ -186,6 +205,7 @@ pci_add_device(uint8_t *id, uint16_t vid, uint16_t pid, 
uint8_t class,
pci.pci_devices[*i