From: Jerome Glisse <jgli...@redhat.com>

GPU reset need to be exclusive, one happening at a time. For this
add a rw semaphore so that any path that trigger GPU activities
have to take the semaphore as a reader thus allowing concurency.

The GPU reset path take the semaphore as a writer ensuring that
no concurrent reset take place.

Signed-off-by: Jerome Glisse <jgli...@redhat.com>
---
 drivers/gpu/drm/radeon/radeon.h        |    1 +
 drivers/gpu/drm/radeon/radeon_cs.c     |    5 +++++
 drivers/gpu/drm/radeon/radeon_device.c |    2 ++
 drivers/gpu/drm/radeon/radeon_gem.c    |    7 +++++++
 4 files changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 77b4519b..29d6986 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1446,6 +1446,7 @@ struct radeon_device {
        struct device                   *dev;
        struct drm_device               *ddev;
        struct pci_dev                  *pdev;
+       struct rw_semaphore             exclusive_lock;
        /* ASIC */
        union radeon_asic_config        config;
        enum radeon_family              family;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c 
b/drivers/gpu/drm/radeon/radeon_cs.c
index f1b7527..7ee6491 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -499,7 +499,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
        struct radeon_cs_parser parser;
        int r;
 
+       down_read(&rdev->exclusive_lock);
        if (!rdev->accel_working) {
+               up_read(&rdev->exclusive_lock);
                return -EBUSY;
        }
        /* initialize parser */
@@ -512,6 +514,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
        if (r) {
                DRM_ERROR("Failed to initialize parser !\n");
                radeon_cs_parser_fini(&parser, r);
+               up_read(&rdev->exclusive_lock);
                r = radeon_cs_handle_lockup(rdev, r);
                return r;
        }
@@ -520,6 +523,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
                if (r != -ERESTARTSYS)
                        DRM_ERROR("Failed to parse relocation %d!\n", r);
                radeon_cs_parser_fini(&parser, r);
+               up_read(&rdev->exclusive_lock);
                r = radeon_cs_handle_lockup(rdev, r);
                return r;
        }
@@ -533,6 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
        }
 out:
        radeon_cs_parser_fini(&parser, r);
+       up_read(&rdev->exclusive_lock);
        r = radeon_cs_handle_lockup(rdev, r);
        return r;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c 
b/drivers/gpu/drm/radeon/radeon_device.c
index f654ba8..c8fdb40 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -988,6 +988,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
        int r;
        int resched;
 
+       down_write(&rdev->exclusive_lock);
        radeon_save_bios_scratch_regs(rdev);
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1007,6 +1008,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
                dev_info(rdev->dev, "GPU reset failed\n");
        }
 
+       up_write(&rdev->exclusive_lock);
        return r;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c 
b/drivers/gpu/drm/radeon/radeon_gem.c
index d9b0809..f99db63 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -215,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void 
*data,
        uint32_t handle;
        int r;
 
+       down_read(&rdev->exclusive_lock);
        /* create a gem object to contain this object in */
        args->size = roundup(args->size, PAGE_SIZE);
        r = radeon_gem_object_create(rdev, args->size, args->alignment,
                                        args->initial_domain, false,
                                        false, &gobj);
        if (r) {
+               up_read(&rdev->exclusive_lock);
                r = radeon_gem_handle_lockup(rdev, r);
                return r;
        }
@@ -228,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void 
*data,
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(gobj);
        if (r) {
+               up_read(&rdev->exclusive_lock);
                r = radeon_gem_handle_lockup(rdev, r);
                return r;
        }
        args->handle = handle;
+       up_read(&rdev->exclusive_lock);
        return 0;
 }
 
@@ -240,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, 
void *data,
 {
        /* transition the BO to a domain -
         * just validate the BO into a certain domain */
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_set_domain *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -255,9 +260,11 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, 
void *data,
        }
        robj = gem_to_radeon_bo(gobj);
 
+       down_read(&rdev->exclusive_lock);
        r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
        drm_gem_object_unreference_unlocked(gobj);
+       up_read(&rdev->exclusive_lock);
        r = radeon_gem_handle_lockup(robj->rdev, r);
        return r;
 }
-- 
1.7.10.2

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to