On 01/06/2011 12:19 PM, Sheng Yang wrote:
Then we can support mask bit operation of assigned devices now.



+int kvm_assigned_device_update_msix_mask_bit(struct kvm *kvm,
+                               int assigned_dev_id, int entry, bool mask)
+{
+       int r = -EFAULT;
+       struct kvm_assigned_dev_kernel *adev;
+       int i;
+
+       if (!irqchip_in_kernel(kvm))
+               return r;
+
+       mutex_lock(&kvm->lock);
+       adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
+                                     assigned_dev_id);
+       if (!adev)
+               goto out;
+
+       for (i = 0; i<  adev->entries_nr; i++)
+               if (adev->host_msix_entries[i].entry == entry) {
+                       if (mask)
+                               disable_irq_nosync(
+                                       adev->host_msix_entries[i].vector);

Is it okay to call disable_irq_nosync() here? IIRC we don't check the mask bit on irq delivery, so we may forward an interrupt to the guest after the mask bit was set.

What does pci say about the mask bit?  when does it take effect?

Another question is whether disable_irq_nosync() actually programs the device mask bit, or not. If it does, then it's slow, and it may be better to leave interrupts enabled but have an internal pending bit. If it doesn't program the mask bit, it's fine.

+                       else
+                               enable_irq(adev->host_msix_entries[i].vector);
+                       r = 0;
+                       break;
+               }
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}

+
+static int msix_table_mmio_read(struct kvm_io_device *this, gpa_t addr, int 
len,
+                               void *val)
+{
+       struct kvm_msix_mmio_dev *mmio_dev =
+               container_of(this, struct kvm_msix_mmio_dev, table_dev);
+       struct kvm_msix_mmio *mmio;
+       int idx, ret = 0, entry, offset, r;
+
+       mutex_lock(&mmio_dev->lock);
+       idx = get_mmio_table_index(mmio_dev, addr, len);
+       if (idx<  0) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+       if ((addr&  0x3) || (len != 4&&  len != 8))
+               goto out;
+
+       offset = addr&  0xf;
+       if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL&&  len == 8)
+               goto out;
+
+       mmio =&mmio_dev->mmio[idx];
+       entry = (addr - mmio->table_base_addr) / PCI_MSIX_ENTRY_SIZE;
+       r = copy_from_user(val, (void __user *)(mmio->table_base_va +
+                       entry * PCI_MSIX_ENTRY_SIZE + offset), len);
+       if (r)
+               goto out;

and return ret == 0?

+out:
+       mutex_unlock(&mmio_dev->lock);
+       return ret;
+}
+
+static int msix_table_mmio_write(struct kvm_io_device *this, gpa_t addr,
+                               int len, const void *val)
+{
+       struct kvm_msix_mmio_dev *mmio_dev =
+               container_of(this, struct kvm_msix_mmio_dev, table_dev);
+       struct kvm_msix_mmio *mmio;
+       int idx, entry, offset, ret = 0, r = 0;
+       gpa_t entry_base;
+       u32 old_ctrl, new_ctrl;
+       u32 *ctrl_pos;
+
+       mutex_lock(&mmio_dev->lock);
+       idx = get_mmio_table_index(mmio_dev, addr, len);
+       if (idx<  0) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+       if ((addr&  0x3) || (len != 4&&  len != 8))
+               goto out;
+
+       offset = addr&  0xF;
+       if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL&&  len == 8)
+               goto out;
+
+       mmio =&mmio_dev->mmio[idx];
+       entry = (addr - mmio->table_base_addr) / PCI_MSIX_ENTRY_SIZE;
+       entry_base = mmio->table_base_va + entry * PCI_MSIX_ENTRY_SIZE;
+       ctrl_pos = (u32 *)(entry_base + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+       if (get_user(old_ctrl, ctrl_pos))
+               goto out;
+
+       /* No allow writing to other fields when entry is unmasked */
+       if (!(old_ctrl&  PCI_MSIX_ENTRY_CTRL_MASKBIT)&&
+           offset != PCI_MSIX_ENTRY_VECTOR_CTRL)
+               goto out;
+
+       if (copy_to_user((void __user *)(entry_base + offset), val, len))
+               goto out;
+
+       if (get_user(new_ctrl, ctrl_pos))
+               goto out;

here, too.

+
+       if ((offset<  PCI_MSIX_ENTRY_VECTOR_CTRL&&  len == 4) ||
+           (offset<  PCI_MSIX_ENTRY_DATA&&  len == 8))
+               ret = -ENOTSYNC;

goto out?

+       if (old_ctrl == new_ctrl)
+               goto out;
+       if (!(old_ctrl&  PCI_MSIX_ENTRY_CTRL_MASKBIT)&&
+                       (new_ctrl&  PCI_MSIX_ENTRY_CTRL_MASKBIT))
+               r = update_msix_mask_bit(mmio_dev->kvm, mmio, entry, 1);
+       else if ((old_ctrl&  PCI_MSIX_ENTRY_CTRL_MASKBIT)&&
+                       !(new_ctrl&  PCI_MSIX_ENTRY_CTRL_MASKBIT))
+               r = update_msix_mask_bit(mmio_dev->kvm, mmio, entry, 0);
+       if (r || ret)
+               ret = -ENOTSYNC;
+out:
+       mutex_unlock(&mmio_dev->lock);
+       return ret;
+}
+

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to