[PATCH 2/3] Take bo type argument out of the ioctl interface.

2007-10-10 Thread Kristian Høgsberg
From: Kristian Høgsberg [EMAIL PROTECTED]

The buffer object type is still tracked internally, but it is no longer
part of the user space visible ioctl interface.  If the bo create ioctl
specifies a non-NULL buffer address we assume drm_bo_type_user,
otherwise drm_bo_type_dc.  Kernel side allocations call
drm_buffer_object_create() directly and can still specify drm_bo_type_kernel.
Not 100% this makes sense either, but with this patch, the buffer type
is no longer exported and we can clean up the internals later on.
---
 libdrm/xf86drm.c |   19 +++
 libdrm/xf86mm.h  |8 +++-
 linux-core/drm_bo.c  |   11 +++
 linux-core/drm_objects.h |6 ++
 shared-core/drm.h|8 
 5 files changed, 19 insertions(+), 33 deletions(-)

diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index bb2b3ab..c450a98 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2698,8 +2698,8 @@ static void drmBOCopyReply(const struct drm_bo_info_rep 
*rep, drmBO *buf)
 
 
 
-int drmBOCreate(int fd, unsigned long start, unsigned long size,
-   unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
+int drmBOCreate(int fd, unsigned long size,
+   unsigned pageAlignment, void *user_buffer,
uint64_t mask,
unsigned hint, drmBO *buf)
 {
@@ -2713,23 +2713,11 @@ int drmBOCreate(int fd, unsigned long start, unsigned 
long size,
 req-mask = mask;
 req-hint = hint;
 req-size = size;
-req-type = type;
 req-page_alignment = pageAlignment;
+req-buffer_start = (unsigned long) user_buffer;
 
 buf-virtual = NULL;
 
-switch(type) {
-case drm_bo_type_dc:
-req-buffer_start = start;
-   break;
-case drm_bo_type_user:
-   req-buffer_start = (unsigned long) user_buffer;
-   buf-virtual = user_buffer;
-   break;
-default:
-   return -EINVAL;
-}
-
 do {
ret = ioctl(fd, DRM_IOCTL_BO_CREATE, arg);
 } while (ret != 0  errno == EAGAIN);
@@ -2777,7 +2765,6 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
return -errno;
 
 drmBOCopyReply(rep, buf);
-buf-type = drm_bo_type_dc;
 buf-mapVirtual = NULL;
 buf-mapCount = 0;
 buf-virtual = NULL;
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index cacd13a..0dac7ef 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -106,7 +106,6 @@ typedef struct _drmFence
 
 typedef struct _drmBO
 {
-drm_bo_type_t type;
 unsigned handle;
 uint64_t mapHandle;
 uint64_t flags;
@@ -179,10 +178,9 @@ extern int drmBOCreateList(int numTarget, drmBOList *list);
  * Buffer object functions.
  */
 
-extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
-  unsigned pageAlignment,void *user_buffer,
-  drm_bo_type_t type, uint64_t mask,
-  unsigned hint, drmBO *buf);
+extern int drmBOCreate(int fd, unsigned long size,
+  unsigned pageAlignment, void *user_buffer,
+  uint64_t mask, unsigned hint, drmBO *buf);
 extern int drmBODestroy(int fd, drmBO *buf);
 extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
 extern int drmBOUnReference(int fd, drmBO *buf);
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 7dd9856..e2f460e 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -1620,7 +1620,10 @@ int drm_buffer_object_create(struct drm_device *dev,
INIT_LIST_HEAD(bo-vma_list);
 #endif
bo-dev = dev;
-   bo-type = type;
+   if (buffer_start != 0)
+   bo-type = drm_bo_type_user;
+   else
+   bo-type = type;
bo-num_pages = num_pages;
bo-mem.mem_type = DRM_BO_MEM_LOCAL;
bo-mem.num_pages = bo-num_pages;
@@ -1783,8 +1786,8 @@ int drm_bo_create_ioctl(struct drm_device *dev, void 
*data, struct drm_file *fil
struct drm_buffer_object *entry;
int ret = 0;
 
-   DRM_DEBUG(drm_bo_create_ioctl: %dkb, %dkb align, %d type\n,
-   (int)(req-size / 1024), req-page_alignment * 4, req-type);
+   DRM_DEBUG(drm_bo_create_ioctl: %dkb, %dkb align\n,
+   (int)(req-size / 1024), req-page_alignment * 4);
 
if (!dev-bm.initialized) {
DRM_ERROR(Buffer object manager is not initialized.\n);
@@ -1792,7 +1795,7 @@ int drm_bo_create_ioctl(struct drm_device *dev, void 
*data, struct drm_file *fil
}
 
ret = drm_buffer_object_create(file_priv-head-dev,
-  req-size, req-type, req-mask,
+  req-size, drm_bo_type_dc, req-mask,
   req-hint, req-page_alignment,
   req-buffer_start, entry);
if (ret)
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 9748baa..b58db57 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -350,6 +350,12 

[PATCH 1/3] Eliminate support for fake buffers.

2007-10-10 Thread Kristian Høgsberg
---
 libdrm/xf86drm.c|9 ++
 linux-core/drm_bo.c |   68 +-
 shared-core/drm.h   |1 -
 3 files changed, 5 insertions(+), 73 deletions(-)

diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index dc18d6f..bb2b3ab 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2726,9 +2726,6 @@ int drmBOCreate(int fd, unsigned long start, unsigned 
long size,
req-buffer_start = (unsigned long) user_buffer;
buf-virtual = user_buffer;
break;
-case drm_bo_type_fake:
-req-buffer_start = start;
-   break;
 default:
return -EINVAL;
 }
@@ -2751,7 +2748,7 @@ int drmBODestroy(int fd, drmBO *buf)
 {
 struct drm_bo_handle_arg arg;
 
-if (buf-mapVirtual  (buf-type != drm_bo_type_fake)) {
+if (buf-mapVirtual) {
(void) drmUnmap(buf-mapVirtual, buf-start + buf-size);
buf-mapVirtual = NULL;
buf-virtual = NULL;
@@ -2792,7 +2789,7 @@ int drmBOUnReference(int fd, drmBO *buf)
 {
 struct drm_bo_handle_arg arg;
 
-if (buf-mapVirtual  (buf-type != drm_bo_type_fake)) {
+if (buf-mapVirtual) {
(void) munmap(buf-mapVirtual, buf-start + buf-size);
buf-mapVirtual = NULL;
buf-virtual = NULL;
@@ -2827,7 +2824,7 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, 
unsigned mapHint,
  * Make sure we have a virtual address of the buffer.
  */
 
-if (!buf-virtual  buf-type != drm_bo_type_fake) {
+if (!buf-virtual) {
drmAddress virtual;
virtual = mmap(0, buf-size + buf-start, 
   PROT_READ | PROT_WRITE, MAP_SHARED,
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 4e73577..7dd9856 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -148,7 +148,6 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
ret = -ENOMEM;
break;
case drm_bo_type_user:
-   case drm_bo_type_fake:
break;
default:
DRM_ERROR(Illegal buffer object type\n);
@@ -695,12 +694,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, 
unsigned mem_type,
evict_mem = bo-mem;
evict_mem.mm_node = NULL;
 
-   if (bo-type == drm_bo_type_fake) {
-   bo-mem.mem_type = DRM_BO_MEM_LOCAL;
-   bo-mem.mm_node = NULL;
-   goto out1;
-   }
-
evict_mem = bo-mem;
evict_mem.mask = dev-driver-bo_driver-evict_mask(bo);
ret = drm_bo_mem_space(bo, evict_mem, no_wait);
@@ -720,7 +713,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, 
unsigned mem_type,
goto out;
}
 
-  out1:
mutex_lock(dev-struct_mutex);
if (evict_mem.mm_node) {
if (evict_mem.mm_node != bo-pinned_node)
@@ -1355,44 +1347,6 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
return 1;
 }
 
-static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * 
mem)
-{
-   struct drm_buffer_manager *bm = dev-bm;
-   struct drm_mem_type_manager *man;
-   uint32_t num_prios = dev-driver-bo_driver-num_mem_type_prio;
-   const uint32_t *prios = dev-driver-bo_driver-mem_type_prio;
-   uint32_t i;
-   int type_ok = 0;
-   uint32_t mem_type = 0;
-   uint32_t cur_flags;
-
-   if (drm_bo_mem_compat(mem))
-   return 0;
-
-   BUG_ON(mem-mm_node);
-
-   for (i = 0; i  num_prios; ++i) {
-   mem_type = prios[i];
-   man = bm-man[mem_type];
-   type_ok = drm_bo_mt_compatible(man, mem_type, mem-mask,
-  cur_flags);
-   if (type_ok)
-   break;
-   }
-
-   if (type_ok) {
-   mem-mm_node = NULL;
-   mem-mem_type = mem_type;
-   mem-flags = cur_flags;
-   DRM_FLAG_MASKED(mem-flags, mem-mask, ~DRM_BO_MASK_MEMTYPE);
-   return 0;
-   }
-
-   DRM_ERROR(Illegal fake buffer flags 0x%016llx\n,
- (unsigned long long) mem-mask);
-   return -EINVAL;
-}
-
 /*
  * bo locked.
  */
@@ -1449,11 +1403,6 @@ static int drm_buffer_object_validate(struct 
drm_buffer_object * bo,
DRM_ERROR(Timed out waiting for buffer unmap.\n);
return ret;
}
-   if (bo-type == drm_bo_type_fake) {
-   ret = drm_bo_check_fake(dev, bo-mem);
-   if (ret)
-   return ret;
-   }
 
/*
 * Check whether we need to move buffer.
@@ -1642,7 +1591,7 @@ int drm_buffer_object_create(struct drm_device *dev,
int ret = 0;
unsigned long num_pages;
 
-   if ((buffer_start  ~PAGE_MASK)  (type != drm_bo_type_fake)) {
+   if (buffer_start  ~PAGE_MASK) {
DRM_ERROR(Invalid buffer object start.\n);
return -EINVAL;
}
@@ -1677,12 +1626,7 @@ int 

[PATCH 3/3] Drop destroy ioctls for fences and buffer objects.

2007-10-10 Thread Kristian Høgsberg
From: Kristian Høgsberg [EMAIL PROTECTED]

We now always create a drm_ref_object for user objects and this is then the only
things that holds a reference to the user object.  This way unreference on will
destroy the user object when the last drm_ref_object goes way.
---
 libdrm/xf86drm.c |   32 
 libdrm/xf86mm.h  |2 --
 linux-core/drmP.h|1 -
 linux-core/drm_bo.c  |   31 ++-
 linux-core/drm_drv.c |2 --
 linux-core/drm_fence.c   |   28 +---
 linux-core/drm_fops.c|   13 -
 linux-core/drm_object.c  |   27 +++
 linux-core/drm_objects.h |   12 
 shared-core/drm.h|2 --
 10 files changed, 10 insertions(+), 140 deletions(-)

diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index c450a98..7666d43 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2389,18 +2389,6 @@ int drmFenceBuffers(int fd, unsigned flags, uint32_t 
fence_class, drmFence *fenc
 fence-signaled = 0;
 return 0;
 }
-
-int drmFenceDestroy(int fd, const drmFence *fence)
-{
-drm_fence_arg_t arg;
-
-memset(arg, 0, sizeof(arg));
-arg.handle = fence-handle;
-
-if (ioctl(fd, DRM_IOCTL_FENCE_DESTROY, arg))
-   return -errno;
-return 0;
-}
 
 int drmFenceReference(int fd, unsigned handle, drmFence *fence)
 {
@@ -2732,26 +2720,6 @@ int drmBOCreate(int fd, unsigned long size,
 return 0;
 }
 
-int drmBODestroy(int fd, drmBO *buf)
-{
-struct drm_bo_handle_arg arg;
-
-if (buf-mapVirtual) {
-   (void) drmUnmap(buf-mapVirtual, buf-start + buf-size);
-   buf-mapVirtual = NULL;
-   buf-virtual = NULL;
-}
-
-memset(arg, 0, sizeof(arg));
-arg.handle = buf-handle;
-
-if (ioctl(fd, DRM_IOCTL_BO_DESTROY, arg))
-   return -errno;
-
-buf-handle = 0;
-return 0;
-}
-
 int drmBOReference(int fd, unsigned handle, drmBO *buf)
 {
 struct drm_bo_reference_info_arg arg;
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index 0dac7ef..d99e61e 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -150,7 +150,6 @@ typedef struct _drmBOList {
 
 extern int drmFenceCreate(int fd, unsigned flags, int fence_class,
   unsigned type, drmFence *fence);
-extern int drmFenceDestroy(int fd, const drmFence *fence);
 extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
 extern int drmFenceUnreference(int fd, const drmFence *fence);
 extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
@@ -181,7 +180,6 @@ extern int drmBOCreateList(int numTarget, drmBOList *list);
 extern int drmBOCreate(int fd, unsigned long size,
   unsigned pageAlignment, void *user_buffer,
   uint64_t mask, unsigned hint, drmBO *buf);
-extern int drmBODestroy(int fd, drmBO *buf);
 extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
 extern int drmBOUnReference(int fd, drmBO *buf);
 extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index f8ca3f4..d0ab2c9 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -428,7 +428,6 @@ struct drm_file {
 */
 
struct list_head refd_objects;
-   struct list_head user_objects;
 
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index e2f460e..fb360e7 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -1674,8 +1674,8 @@ int drm_buffer_object_create(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_buffer_object_create);
 
-int drm_bo_add_user_object(struct drm_file *file_priv,
-  struct drm_buffer_object *bo, int shareable)
+static int drm_bo_add_user_object(struct drm_file *file_priv,
+ struct drm_buffer_object *bo, int shareable)
 {
struct drm_device *dev = file_priv-head-dev;
int ret;
@@ -1694,7 +1694,6 @@ int drm_bo_add_user_object(struct drm_file *file_priv,
mutex_unlock(dev-struct_mutex);
return ret;
 }
-EXPORT_SYMBOL(drm_bo_add_user_object);
 
 static int drm_bo_lock_test(struct drm_device * dev, struct drm_file 
*file_priv)
 {
@@ -1816,32 +1815,6 @@ out:
return ret;
 }
 
-
-int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file 
*file_priv)
-{
-   struct drm_bo_handle_arg *arg = data;
-   struct drm_user_object *uo;
-   int ret = 0;
-
-   DRM_DEBUG(drm_bo_destroy_ioctl: buffer %d\n, arg-handle);
-
-   if (!dev-bm.initialized) {
-   DRM_ERROR(Buffer object manager is not initialized.\n);
-   return -EINVAL;
-   }
-
-   mutex_lock(dev-struct_mutex);
-   uo = drm_lookup_user_object(file_priv, arg-handle);
-   if (!uo || (uo-type != drm_buffer_type) || uo-owner != file_priv) {
-   

Recent pre-superioctl push

2007-10-10 Thread Thomas Hellström
Oops.
Just ignore those latest commits. They got pushed by accident.
Since that branch has been merged it should be considered obsolete.

/Thomas




-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
--
___
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel


Re: driver fence_type function

2007-10-10 Thread Thomas Hellström
Dave Airlie wrote:


 Hi Thomas (any anyone else :-)

 drm_buffer_object_validate gets passed new and old flags (using 
 bo-mem.mask/bo-mem.flags) but when it calls the i915 fence_type 
 function it only uses flags to check the fence type, now if this 
 buffer is getting validated RW when it wasn't before, it will get the 
 wrong fence type back..

 I've changed this in my tree to (bo-mem.mask | bo-mem.flags) which 
 or's the old and new types together but I wonder should it just be 
 checking the mask and not flags at all?

 Dave.

Yes, Dave.
The correct way should be to check mask only.

/Thomas




-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
--
___
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel


[Bug 8292] i915: texture crossbar

2007-10-10 Thread bugzilla-daemon
http://bugs.freedesktop.org/show_bug.cgi?id=8292





--- Comment #8 from [EMAIL PROTECTED]  2007-10-10 13:50 PST ---
No need to apply any patch.  Use mesa master.


-- 
Configure bugmail: http://bugs.freedesktop.org/userprefs.cgi?tab=email
--- You are receiving this mail because: ---
You are the assignee for the bug, or are watching the assignee.

-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
--
___
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel


Re: [rfc] cache flush avoidance..

2007-10-10 Thread Dave Airlie


 Since Poulsbo is CMA, to avoid the SMP ipi issue, it should be possible
 to enclose the whole reloc fixup within a spinlock and use
 kmap_atomic which should be faster than kmap.
 Since within a spinlock, also preemption is disabled we can guarantee
 that a batchbuffer write followed by a clflush executes on the same
 processor = no need for ipi, and the clflush can follow immediately
 after a write.
 We've used this technique in psb_mmu.c, although we're using
 preempt_disable() / preempt_enable() to collect per-processor clflushes.

 So, basically something like the following should be a fast ipi-free way
 to do this:

 spin_lock()
 while(more_relocs_to_do) {
  kmap_atomic(dst_buffer); // Reuse old map if same page
  apply_reloc():
  clflush(newly_written_address);
  kunmap_atomic(dst_buffer);
 }
 spin_unlock();

So this should work fine if every cacheline portion of the buffer to 
relocate contains a relocation, so that the snoop logic invalidates that 
cacheline on the other processors, but if you have very sparse relocations 
I could see ssomething like

CPU0 writes relocation bo initially - one page with no relocations in 
cache
-schedule
CPU1 enters kernel preempt sectiion, and starts relocating never
hitting that page,
CPU1 clflushes
GPU never sees the one page with no relocs..

Now maybe I'm missing something but I'm not sure how to protect against 
that..

Dave.


-
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now  http://get.splunk.com/
--
___
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel