On 30/11/15 21:39, Konrad Rzeszutek Wilk wrote:
> On Fri, Nov 27, 2015 at 05:17:05PM +0000, David Vrabel wrote:
>> Add IOCTL_GNTDEV_GRANT_COPY to allow applications to copy between user
>> space buffers and grant references.
>>
>> This interface is similar to the GNTTABOP_copy hypercall ABI except
>> the local buffers are provided using a virtual address (instead of a
>> GFN and offset).  To avoid userspace from having to page align its
>> buffers the driver will use two or more ops if required.
>>
>> If the ioctl returns 0, the application must check the status of each
>> segment with the segments status field.  If the ioctl returns a -ve
>> error code (EINVAL or EFAULT), the status of individual ops is
>> undefined.
> 
> Are there any test tools that could be used for this? To make sure that
> regression wise this does not get broken?

See attached.

>> +static int gntdev_copy(struct gntdev_copy_batch *batch)
>> +{
>> +    unsigned int i;
>> +
>> +    gnttab_batch_copy(batch->ops, batch->nr_ops);
>> +    gntdev_put_pages(batch);
>> +
>> +    /*
>> +     * For each completed op, update the status if the op failed
>> +     * and a previous failure for the segment hasn't been
>> +     * recorded.
> 
> How could an previous failure not be recorded? Could you mention that
> in this nice comment please?

All the negatives in this sentence are confusing so I'll reword.

If we haven't recorded a failure for the previous op in the segment it's
because it succeeded.  The aim here is to record the first op failure
for a segment.  From the ioctl documentation:

+ * If the driver had to split a segment into two or more ops, @status
+ * includes the status of the first failed op for that segment (or
+ * GNTST_okay if all ops were successful).

>> +static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user 
>> *u)
>> +{
>> +    struct ioctl_gntdev_grant_copy copy;
>> +    struct gntdev_copy_batch batch = { .nr_ops = 0, .nr_pages = 0, };
>> +    unsigned int i;
>> +    int ret = 0;
>> +
>> +    if (copy_from_user(&copy, u, sizeof(copy)))
>> +            return -EFAULT;
>>
> +
> 
> No check on the .nr_pages' ? What if it is 0xfffffffffffffffffffffffff?
> 
> Ditto for .nr_ops?

batch.nr_ops and batch.nr_pages are internal, not supplied by the user
and are limited by the batch size.

I guess you're really asking about the value of copy.count?  This
doesn't matter because we process the segments one by one and have a
fixed batch size for the ops.

There's also a cond_resched() every segment so submitting a single ioctl
with a crazy number of segments is really no different from userspace
calling the ioctl a crazy number of times.

David

p.s. Please trim your replies when reviewing.
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>

#include <xenctrl.h>

struct gntdev_grant_copy_segment {
	union {
		void *virt;
		struct {
			grant_ref_t ref;
			uint16_t offset;
			domid_t domid;
		} foreign;
	} source, dest;
	uint16_t len;

	uint16_t flags;  /* GNTCOPY_* */
	int16_t status; /* GNTST_* */
};

#define IOCTL_GNTDEV_GRANT_COPY \
	_IOC(_IOC_NONE, 'G', 8, sizeof(struct ioctl_gntdev_grant_copy))
struct ioctl_gntdev_grant_copy {
	unsigned int count;
	struct gntdev_grant_copy_segment *segments;
};

#define NR_REFS 15

static void random_fill(char *buf, size_t len)
{
    int fd;
    int r = 0;

    fd = open("/dev/urandom", O_RDONLY);
    if (fd < 0) {
        perror("open(/dev/urandom)");
        exit(1);
    }

    while (r < len) {
        int ret;

        ret = read(fd, buf + r, len - r);
        if (ret < 0) {
            perror("read");
            exit(1);
        }
        r += ret;
    }
    close(fd);
}

static void do_copy(struct ioctl_gntdev_grant_copy *copy, char *a, char *b)
{
    int fd;
    unsigned int i;
    int ret;

    random_fill(a, NR_REFS * 4096);

    fd = open("/dev/xen/gntdev", O_RDWR);
    if (fd < 0) {
        perror("open(/dev/xen/gntdev)");
        exit(1);
    }

    ret = ioctl(fd, IOCTL_GNTDEV_GRANT_COPY, copy);
    if (ret < 0) {
        perror("ioctl(IOCTL_GNTDEV_GRANT_COPY)");
        exit(1);
    }

    close(fd);

    for (i = 0; i < NR_REFS; i++) {
        if (copy->segments[i].status != GNTST_okay) {
            fprintf(stderr, "[%u]: bad copy status: %d\n", i, copy->segments[i].status);
            exit(1);
        }
    }

    if (memcmp(a, b, NR_REFS * 4096) != 0) {
        fprintf(stderr, "a != b\n");
        exit(1);
    }
}

int main(void)
{
    xc_gntshr *gs;
    xc_gnttab *gt;
    uint32_t refs[NR_REFS];
    struct gntdev_grant_copy_segment seg[NR_REFS];
    struct ioctl_gntdev_grant_copy copy;
    char *shared;
    char local[4096 * NR_REFS];
    unsigned int i;

    gs = xc_gntshr_open(NULL, 0);
    if (!gs) {
        perror("xc_gntshr_open");
        exit(1);
    }

    gt = xc_gnttab_open(NULL, 0);
    if (!gt) {
        perror("xc_gnttab_open");
        exit(1);
    }

    shared = xc_gntshr_share_pages(gs, 0, NR_REFS, refs, 1);
    if (shared == NULL) {
        perror("xc_gntshr_share_pages");
        exit(1);
    }

    /*
     * 1. ref -> local
     */
    printf("ref -> local\n");

    for (i = 0; i < NR_REFS; i++) {
        seg[i].source.foreign.ref = refs[i];
        seg[i].source.foreign.offset = 0;
        seg[i].source.foreign.domid = 0;

        seg[i].dest.virt = local + i * 4096;

        seg[i].len = 4096;
        seg[i].flags = GNTCOPY_source_gref;
    }

    copy.count = NR_REFS;
    copy.segments = seg;

    do_copy(&copy, shared, local);

    printf("  ok\n");

    /*
     * 2. local -> ref
     */
    printf("local -> ref\n");

    for (i = 0; i < NR_REFS; i++) {
        seg[i].source.virt = local + i * 4096;

        seg[i].dest.foreign.ref = refs[i];
        seg[i].dest.foreign.offset = 0;
        seg[i].dest.foreign.domid = 0;

        seg[i].len = 4096;
        seg[i].flags = GNTCOPY_dest_gref;
    }

    copy.count = NR_REFS;
    copy.segments = seg;

    do_copy(&copy, local, shared);

    printf("  ok\n");

    return 0;    
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to