---
 intel/intel_bufmgr_gem.c |    3 ++-
 intel/intel_decode.c     |    3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 28f8887..8ca6d43 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -1338,13 +1338,14 @@ int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo 
*bo)

 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
 {
-       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       drm_intel_bufmgr_gem *bufmgr_gem;
        int ret = 0;

        if (bo == NULL)
                return 0;

+       bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        pthread_mutex_lock(&bufmgr_gem->lock);

        if (bo_gem->map_count <= 0) {
diff --git a/intel/intel_decode.c b/intel/intel_decode.c
index 19a8d36..e8daf6a 100644
--- a/intel/intel_decode.c
+++ b/intel/intel_decode.c
@@ -3893,7 +3893,7 @@ drm_intel_decode(struct drm_intel_decode *ctx)
        int ret;
        unsigned int index = 0;
        uint32_t devid;
-       int size = ctx->base_count * 4;
+       int size;
        void *temp;

        if (!ctx)
@@ -3903,6 +3903,7 @@ drm_intel_decode(struct drm_intel_decode *ctx)
         * the batchbuffer.  This lets us avoid a bunch of length
         * checking in statically sized packets.
         */
+       size = ctx->base_count * 4;
        temp = malloc(size + 4096);
        memcpy(temp, ctx->base_data, size);
        memset((char *)temp + size, 0xd0, 4096);
-- 
1.7.10.4

Reply via email to