[PATCH:synaptics 3/5] Clean up debugging system, allow for --enable-debug

2009-11-02 Thread Peter Hutterer
Add --enable-debug to list of configure options.
Clean up the DBG macro to use xf86MsgVerb and supply the verbosity.
Don't use ErrorF from the driver, use xf86Msg instead.

Signed-off-by: Peter Hutterer 
---
 configure.ac   |9 +++
 src/eventcomm.c|6 ++--
 src/ps2comm.c  |   30 +-
 src/synaptics.c|   69 
 src/synapticsstr.h |   11 
 5 files changed, 67 insertions(+), 58 deletions(-)

diff --git a/configure.ac b/configure.ac
index bdc7764..49383cf 100644
--- a/configure.ac
+++ b/configure.ac
@@ -73,6 +73,15 @@ if test "x${BUILD_PSMCOMM}" = "xyes" ; then
AC_DEFINE(BUILD_PSMCOMM, 1, [Optional backend psmcomm enabled])
 fi
 
+AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug],
+[Enable debugging (default: disabled)]),
+[DEBUGGING=$enableval], [DEBUGGING=no])
+
+if test "x$DEBUGGING" = xyes; then
+   AC_DEFINE(DEBUG, 1, [Enable debugging code])
+fi
+AM_CONDITIONAL(DEBUG, [test "x$DEBUGGING" = xyes])
+
 AC_ARG_WITH(xorg-module-dir,
 AC_HELP_STRING([--with-xorg-module-dir=DIR],
[Default xorg module directory 
[[default=$libdir/xorg/modules]]]),
diff --git a/src/eventcomm.c b/src/eventcomm.c
index d8138d4..8287ca2 100644
--- a/src/eventcomm.c
+++ b/src/eventcomm.c
@@ -437,11 +437,11 @@ EventAutoDevProbe(LocalDevicePtr local)
 
 i = scandir(DEV_INPUT_EVENT, &namelist, EventDevOnly, alphasort);
 if (i < 0) {
-   ErrorF("Couldn't open %s\n", DEV_INPUT_EVENT);
+   xf86Msg(X_ERROR, "Couldn't open %s\n", DEV_INPUT_EVENT);
return FALSE;
 }
 else if (i == 0) {
-   ErrorF("%s The /dev/input/event* device nodes seem to be 
missing\n",
+   xf86Msg(X_ERROR, "%s The /dev/input/event* device nodes seem to 
be missing\n",
local->name);
free(namelist);
return FALSE;
@@ -471,7 +471,7 @@ EventAutoDevProbe(LocalDevicePtr local)
free(namelist);
 
if (!touchpad_found) {
-   ErrorF("%s no synaptics event device found\n", local->name);
+   xf86Msg(X_ERROR, "%s no synaptics event device found\n", 
local->name);
return FALSE;
}
 return TRUE;
diff --git a/src/ps2comm.c b/src/ps2comm.c
index e65e1af..ae2592f 100644
--- a/src/ps2comm.c
+++ b/src/ps2comm.c
@@ -65,18 +65,12 @@
 #define PS2_RES_RESOLUTION(r)  (((r) >> 8) & 0x03)
 #define PS2_RES_SAMPLE_RATE(r) ((r) & 0xff)
 
-/* #define DEBUG */
-
 #ifdef DEBUG
 #define PS2DBG(x) (x)
 #else
 #define PS2DBG(x)
 #endif
 
-#if GET_ABI_MAJOR(ABI_XINPUT_VERSION) >= 1
-#define DBG(a,b)
-#endif
-
 struct SynapticsHwInfo {
 unsigned int model_id; /* Model-ID */
 unsigned int capabilities; /* Capabilities */
@@ -411,7 +405,7 @@ ps2_query_is_synaptics(int fd, struct SynapticsHwInfo* 
synhw)
 if (ps2_synaptics_identify(fd, synhw)) {
return TRUE;
 } else {
-   ErrorF("Query no Synaptics: %06X\n", synhw->identity);
+   xf86Msg(X_ERROR, "Query no Synaptics: %06X\n", synhw->identity);
return FALSE;
 }
 }
@@ -528,22 +522,22 @@ ps2_packet_ok(struct SynapticsHwInfo *synhw, struct 
CommData *comm)
 int newabs = SYN_MODEL_NEWABS(synhw);
 
 if (newabs ? ((buf[0] & 0xC0) != 0x80) : ((buf[0] & 0xC0) != 0xC0)) {
-   DBG(4, ErrorF("Synaptics driver lost sync at 1st byte\n"));
+   DBG(4, "Synaptics driver lost sync at 1st byte\n");
return FALSE;
 }
 
 if (!newabs && ((buf[1] & 0x60) != 0x00)) {
-   DBG(4, ErrorF("Synaptics driver lost sync at 2nd byte\n"));
+   DBG(4, "Synaptics driver lost sync at 2nd byte\n");
return FALSE;
 }
 
 if ((newabs ? ((buf[3] & 0xC0) != 0xC0) : ((buf[3] & 0xC0) != 0x80))) {
-   DBG(4, ErrorF("Synaptics driver lost sync at 4th byte\n"));
+   DBG(4, "Synaptics driver lost sync at 4th byte\n");
return FALSE;
 }
 
 if (!newabs && ((buf[4] & 0x60) != 0x00)) {
-   DBG(4, ErrorF("Synaptics driver lost sync at 5th byte\n"));
+   DBG(4, "Synaptics driver lost sync at 5th byte\n");
return FALSE;
 }
 
@@ -565,16 +559,16 @@ ps2_synaptics_get_packet(LocalDevicePtr local, struct 
SynapticsHwInfo *synhw,
/* test if there is a reset sequence received */
if ((c == 0x00) && (comm->lastByte == 0xAA)) {
if (xf86WaitForInput(local->fd, 5) == 0) {
-   DBG(7, ErrorF("Reset received\n"));
+   DBG(7, "Reset received\n");
proto_ops->QueryHardware(local);
} else
-   DBG(3, ErrorF("faked reset received\n"));
+   DBG(3, "faked reset received\n");
}
comm->lastByte = u;
 
/* to avoid endless loops */
if (count++ > 30) {
-   ErrorF("Synaptics driver lost sync... got gigant

[PATCH:synaptics 4/5] eventcomm: don't use the Xisb buffers for reading.

2009-11-02 Thread Peter Hutterer
The kernel promises to give us 32 bytes for each event, so we don't have to
juggle the Xisb buffers around for the eventcomm devices.

This leaves the Xisb buffers hanging around but useless (they are
initialized regardless). Task for a later cleanup.

Signed-off-by: Peter Hutterer 
---
 src/eventcomm.c |   28 
 1 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/src/eventcomm.c b/src/eventcomm.c
index 8287ca2..d00d810 100644
--- a/src/eventcomm.c
+++ b/src/eventcomm.c
@@ -270,19 +270,23 @@ EventQueryHardware(LocalDevicePtr local)
 }
 
 static Bool
-SynapticsReadEvent(struct CommData *comm, struct input_event *ev)
+SynapticsReadEvent(LocalDevicePtr local, struct input_event *ev)
 {
-int i, c;
-unsigned char *pBuf, u;
-
-for (i = 0; i < sizeof(struct input_event); i++) {
-   if ((c = XisbRead(comm->buffer)) < 0)
-   return FALSE;
-   u = (unsigned char)c;
-   pBuf = (unsigned char *)ev;
-   pBuf[i] = u;
+int rc = TRUE;
+ssize_t len;
+
+len = read(local->fd, ev, sizeof(*ev));
+if (len <= 0)
+{
+/* We use X_NONE here because it doesn't alloc */
+if (errno != EAGAIN)
+xf86MsgVerb(X_NONE, 0, "%s: Read error %s\n", local->name, 
strerror(errno));
+rc = FALSE;
+} else if (len % sizeof(*ev)) {
+xf86MsgVerb(X_NONE, 0, "%s: Read error, invalid number of bytes.", 
local->name);
+rc = FALSE;
 }
-return TRUE;
+return rc;
 }
 
 static Bool
@@ -296,7 +300,7 @@ EventReadHwState(LocalDevicePtr local,
 SynapticsPrivate *priv = (SynapticsPrivate *)local->private;
 SynapticsParameters *para = &priv->synpara;
 
-while (SynapticsReadEvent(comm, &ev)) {
+while (SynapticsReadEvent(local, &ev)) {
switch (ev.type) {
case EV_SYN:
switch (ev.code) {
-- 
1.6.5.1

___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


[PATCH:synaptics 2/5] Always make the input buffer size 200

2009-11-02 Thread Peter Hutterer
PreInit initalizes the input buffer with 200, DeviceOn only with 64. For
consistency, use the same size in both.

Signed-off-by: Peter Hutterer 
---
 src/synaptics.c |6 --
 1 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/src/synaptics.c b/src/synaptics.c
index 597aad2..e0d3555 100644
--- a/src/synaptics.c
+++ b/src/synaptics.c
@@ -111,6 +111,8 @@ typedef enum {
 #define DBG(a,b)
 #endif
 
+#define INPUT_BUFFER_SIZE 200
+
 /*
  * Forward declaration
  /
@@ -635,7 +637,7 @@ SynapticsPreInit(InputDriverPtr drv, IDevPtr dev, int flags)
 if (!alloc_param_data(local))
goto SetupProc_fail;
 
-priv->comm.buffer = XisbNew(local->fd, 200);
+priv->comm.buffer = XisbNew(local->fd, INPUT_BUFFER_SIZE);
 DBG(9, XisbTrace(priv->comm.buffer, 1));
 
 if (!QueryHardware(local)) {
@@ -757,7 +759,7 @@ DeviceOn(DeviceIntPtr dev)
 if (priv->proto_ops->DeviceOnHook)
 priv->proto_ops->DeviceOnHook(local, &priv->synpara);
 
-priv->comm.buffer = XisbNew(local->fd, 64);
+priv->comm.buffer = XisbNew(local->fd, INPUT_BUFFER_SIZE);
 if (!priv->comm.buffer) {
xf86CloseSerial(local->fd);
local->fd = -1;
-- 
1.6.5.1

___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


[PATCH:synaptics 1/5] synclient: if no option is specified, assume -l

2009-11-02 Thread Peter Hutterer
Signed-off-by: Peter Hutterer 
---
 man/synclient.man |2 +-
 tools/synclient.c |3 +++
 2 files changed, 4 insertions(+), 1 deletions(-)

diff --git a/man/synclient.man b/man/synclient.man
index 610b101..8169771 100644
--- a/man/synclient.man
+++ b/man/synclient.man
@@ -96,7 +96,7 @@ positions, called gdx and gdy.
 .RE
 .TP
 \fB\-l\fR
-List current user settings.
+List current user settings. This is the default if no option is given.
 .TP
 \fB\-V\fR
 Print version number and exit.
diff --git a/tools/synclient.c b/tools/synclient.c
index 032b129..316ae2c 100644
--- a/tools/synclient.c
+++ b/tools/synclient.c
@@ -581,6 +581,9 @@ main(int argc, char *argv[])
 Display *dpy;
 XDevice *dev;
 
+if (argc == 1)
+dump_settings = 1;
+
 /* Parse command line parameters */
 while ((c = getopt(argc, argv, "sm:hlV")) != -1) {
switch (c) {
-- 
1.6.5.1

___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


[PATCH:synaptics 0/5] Janitor patches for synaptics.

2009-11-02 Thread Peter Hutterer

Nothing overly exciting, the only really user-affected change is a new
default for synclient.

Peter Hutterer (5):
  synclient: if no option is specified, assume -l
  Always make the input buffer size 200
  Clean up debugging system, allow for --enable-debug
  eventcomm: don't use the Xisb buffers for reading.
  Reduce SynapticsCtrl to a stub.

 configure.ac   |9 ++
 man/synclient.man  |2 +-
 src/eventcomm.c|   34 --
 src/ps2comm.c  |   30 ---
 src/synaptics.c|   80 ++--
 src/synapticsstr.h |   11 +++
 tools/synclient.c  |3 ++
 7 files changed, 89 insertions(+), 80 deletions(-)

___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: error loading pixmap from HD

2009-11-02 Thread Sotirios Karavarsamis
@alan:
Thanks for your piece of advice. I converted the images into XBM
format and the process worked straight away.
However, I would later try libXpm because I want color images in my
application, not binary.

@ajax:
Thanks. I will check this out.

On Mon, Nov 2, 2009 at 7:12 PM, Adam Jackson  wrote:
> On Sun, 2009-11-01 at 23:26 +0200, Sotirios Karavarsamis wrote:
>
>> I am constantly getting a "bitmap invalid file" error (case #2 in the
>> above switch control). In the snippet above, the constant
>> BMP_TILE_BLOCKS is a string representing the path to a PBM/XPM/BMP
>> file on disk. However, neither image format works. How should I
>> correctly load the image file from disk? Or I am I scrueing something
>> in the snippet itself? Also, which image formats work correctly with
>> this particular function?
>
> You could always just install the debuginfo for libX11 and single-step
> through XReadBitmapFileData to see where it's failing.
>
> - ajax
>
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


RFC: Using Composite for pseudocolor visual emulation

2009-11-02 Thread Adam Jackson
This would be a fairly cool feature, and I've looked at it before and
walked away in disgust.  I looked into it again over the weekend and I
want to write down why I'm having trouble with it; maybe someone has
ideas.

Basically, there's an ordering issue.

1: The driver calls miSetVisualTypes and fbScreenInit to set up a list
of supported visuals.  In the cases we're interested in, this is rgb565
or xrgb.  Obviously if we were c8 already we wouldn't care about
emulating c8 visuals.

2: The driver calls fbPictureInit to configure the list of Render
formats.  This gives us formats for a1, x4a4, and [xa]{rgb,bgr} as
required by the protocol, and then also a pile of formats matching the
available visuals.

3: Composite initialization attempts to add synthetic visuals for which
there is not already an existing visual.  To do so, it needs to find a
Render format for that depth; but there won't _be_ a c8 Render format,
because it's not demanded by the protocol, and there's no existing
visual for it because that's the problem we're trying to solve!

What if, in fbPictureInit, we just always created a c8 format?  Well,
you have this in picture.h:

/*
 * gray/color formats use a visual index instead of argb
 */
#define PICT_VISFORMAT(bpp,type,vi) (((bpp) << 24) |  \
 ((type) << 16) | \
 ((vi)))

which sort of implies that the visual need already _exist_.  Maybe you
could create one with vi==0 and then fix it up later in Composite?  Iff
there's not already c8 formats, of course.

But then you run into PictureInitIndexedFormats in PictureFinishInit,
which attempts to find a visual for each indexed format that doesn't
correspond to the root window visual, so it can create a colormap for
the format.  Which you can't _do_ yet, because the visual won't actually
exist until Composite init, which is later because it has to be after
Render init.  Presumably you could case out here as well for when vi==0,
and then fix it up in Composite.

Still, as with all the visual channel mask fixups in the drivers, I'm
left wondering why we have core code that does the wrong thing and then
useful code that fixes it later, instead of just getting it right in the
first place.

- ajax


signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: error loading pixmap from HD

2009-11-02 Thread Adam Jackson
On Sun, 2009-11-01 at 23:26 +0200, Sotirios Karavarsamis wrote:

> I am constantly getting a "bitmap invalid file" error (case #2 in the
> above switch control). In the snippet above, the constant
> BMP_TILE_BLOCKS is a string representing the path to a PBM/XPM/BMP
> file on disk. However, neither image format works. How should I
> correctly load the image file from disk? Or I am I scrueing something
> in the snippet itself? Also, which image formats work correctly with
> this particular function?

You could always just install the debuginfo for libX11 and single-step
through XReadBitmapFileData to see where it's failing.

- ajax


signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [PATCH] Abstract calls to in/out with IN/OUT macros

2009-11-02 Thread Adam Jackson
On Sun, 2009-11-01 at 18:22 -0500, Matt Turner wrote:
> On Sun, Nov 1, 2009 at 6:13 PM, keithp  wrote:
> > Excerpts from Mark Kettenis's message of Sun Nov 01 13:19:04 -0800 2009:
> >
> >> While I sympathise with your attempt to clean up the mess in
> >> compiler.h, this will break at least one driver (xf86-video-i128).
> >
> > A fine example of why we want to merge the drivers into the server :-)
> >
> > In any case, for now, I'd rather see an ABI/API compatible change that
> > has the server get at the system io functions through a standard api
> > while still providing the old interfaces so that drivers continue to
> > build unchanged. Having those old interfaces work through the new
> > standard API would be great if possible. Marking those as 'deprecated'
> > so that the compiler emits a warning would be a nice bonus.
> 
> This would be nice, but I'm not sure how it could be possible, since
> in half the cases of OS/Architecture combinations, the drivers call
> the libc in/out, so no chance to warn. Then again, this is entirely
> the problem with the current 'API' if we can even call it that.

I'd like to suggest fixing this in pciaccess for real:

http://lists.x.org/archives/xorg-devel/2009-November/003255.html

I think the API given there is pretty much right, at least for I/O BARs.
There still remains the question of non-BAR I/O ports, ie, the VGA
space.  Maybe something like:

void *pci_nonbar_open_io_range(struct pci_device *dev, uint16_t port, uint16_t 
count);

I think you want to pass in the device, so if you've got multiple
domains you can get the legacy I/O map for the right domain.  There's
still some questions though:

- Should it refuse to open ports claimed by a PCI BAR?
- Should it refuse to open 0xCF8 / 0xCFC since we already have an API
  for PCI config space access?
- Are there any other ports it should refuse to open?

I think fixing up the drivers for the new API is a fairly small spatch
script per driver.

- ajax


signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: evtouch driver lockups

2009-11-02 Thread Adam Jackson
On Mon, 2009-11-02 at 18:07 +0300, Alexander Sabourenkov wrote:
> Hello.
> 
> I'm seeing Xorg lock up (sleep in futex) during stress-testing touchscreens - 
> that 
> consists of many touches in quick succession, where each touch makes 
> something be 
> redrawn.
> 
> Backtraces are identical in upper part - the signal handler, and differ in 
> the 
> lower part, that is from frame 11 onwards, but it's always something to do 
> with 
> malloc/free. See the example backtrace below.
> 
> As far as I understand, one should not do any memory allocation/deallocation 
> or, 
> more generally, anything that might be protected with locks, in a signal 
> handler.
> 
> Now, my question is: how do I fix that?

Don't call TimerFree from DoBtnAction, clearly.  Use TimerCancel to
disable the middle-button emulation timer instead.  You need not even
free the timer until device close; the next call to TimerSet in
EVTouchLBRBEvent will just reuse the existing allocation.

Of course it should be noted that that TimerSet is _also_ broken, since
it mallocs and it's called from ReadInput, which (as shown in the
backtrace) is called from the signal handler.  So you'll also want to do
something like evdev does:

http://cgit.freedesktop.org/xorg/driver/xf86-input-evdev/commit/?id=ddc126637404cb3d9356b7698779dcd8849f8718

All of which just underlines "use evdev already" I suppose.

- ajax


signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [RFC] DRI2 synchronization and swap bits

2009-11-02 Thread Jesse Barnes
On Sun, 1 Nov 2009 21:46:45 +0100
Mario Kleiner  wrote:
> I read this RFC and i'm very excited about the prospect of having  
> well working support for the OML_sync_control extension in DRI2 on  
> Linux/X11. I was hoping for this to happen since years, so a big  
> thank you in advance! This is why i hope to provide some input from  
> the perspective of future "power-users" of functions like  
> glXGetSyncValuesOML(), glXSwapBuffersMscOML(), glXWaitForSbcOML. I'm  
> the co-developer of a popular free-software toolkit (Psychtoolbox)  
> that is used mostly in the neuroscience / cognitive science
> community by scientist to find out how the different senses (visual,
> auditory, haptic, ...) work and how they work together. Our
> requirements to graphics are often much more demanding than what a
> videogame, typical vr-environment or a mediaplayer has.

Thanks a lot for taking time to go through this stuff, it's exactly the
kind of feedback I was hoping for.

> Our users often have very strict requirements for scheduling frame- 
> accurate and tear-free visual stimulus display, synchronizing  
> bufferswaps across display-heads, and low-latency returns from swap- 
> completion. Often they need swap-completion timestamps which are  
> available with the shortest possible delay after a successfull swap  
> and accurately tied to the vblank at which scanout of a swapped
> frame started. The need for timestamps with sub-millisecond accuracy
> is not uncommon. Therefore, well working OML_sync_control support
> would be basically a dream come true and a very compelling feature
> for Linux as a platform for cognitive science.

Doing the wakeups within a millisecond should definitely be possible,
I don't expect the context switch between display server and client
would be *that* high of a cost (but as I said I'll benchmark).

> 2. On the CompositePage in the DRM Wiki, there is this comment:  
> "...It seems that composited apps should never need to know about  
> real world screen vblank issues, ... When dealing with a  
> redirected window it seems it would be acceptable to come up with an  
> entirely fake number for all existing extensions that care about  
> vblanks.."
> 
> I don't like this idea about entirely fake numbers and like to vote  
> for a solution that is as close as possible to the non-redirected  
> case. Most of our applications run in non-redirected, full-screen,  
> undecorated, page-flipped windows, ie., without a compositor being  
> involved. I can think of a couple future usage cases though where  
> reasonably well working redirected/composited windows would be very  
> useful for us, but only if we get meaningful timestamps and vblank  
> counts that are tied to the actual display onset.

The raw numbers will always be exposed to the compositor and probably
to applications via an opt-out mechanism (to be defined still, we don't
even have the extra compositor protocol defined).

> 3. The Wiki also mentions "The direct rendered cases outlined in the  
> implementation notes above are complete, but there's a bug in the  
> async glXSwapBuffers that sometimes causes clients to hang after  
> swapping rather than continue." Looking through the code of  cgit.freedesktop.org/~jbarnes/xf86-video-intel/tree/src/i830_dri.c? 
> id=a0e2e624c47516273fa3d260b86d8c293e2519e4> i can see that in  
> I830DRI2SetupSwap() and I830DRI2SetupWaitMSC(), in the "if (divisor  
> == 0) { ...}" path, the functions return after DRM_VBLANK_EVENT  
> submission without assigning *event_frame = vbl.reply.sequence;
> This looks problematic to me, as the xserver is later submitting  
> event_frame in the call to DRI2AddFrameEvent() inside DRI2SwapBuffers 
> () as a cookie to find the right events for clients to wait on?
> Could this be a reason for clients hanging after swap? I found a few
> other spots where i other misunderstood something or there are small
> bugs. What is the appropriate way to report these?

This list is fine, thanks for checking it out.  I'll fix that up.

> 4. According to spec, the different OML_sync_control functions do  
> return a UST timestamp which is supposed to reflect the exact time
> of when the MSC last incremented, i.e., at the start of scanout of a
> new video frame. SBC and MSC are supposed to increment atomically/ 
> simultaneously at swap completion, so the UST in the (UST,SBC,MSC)  
> triplet is supposed to mark the time of transition of either MSC or  
> MSC and SBC at swap completion. This makes a lot of sense to me, it  
> is exactly the type of timestamp that our toolkit critically depends
> on.
> 
> Ideally the UST timestamp should be corrected to reflect start of  
> scanout, but a UST that is consistently taken at vblank interrupt  
> time would do as well. In the current implementation this is *not*  
> the semantic we'd get for UST timestamps.
> 
> The I830DRI2GetMSC() call uses a call to drmWaitVBlank() and its  
> returned vbl.reply.tval_sec and vbl.reply.tval_usec 

Re: Not getting expected performance improvement after accelerating bitBlit through KAA

2009-11-02 Thread Adam Jackson
On Fri, 2009-10-30 at 13:12 +0530, prudhvi raj wrote:
> Thanks for the response mr. Daniel.
> 
> According to my understanding KDrive has been developed with more
> focus on embedded systems. So why do u want me to go for XOrg ( u mean
> XAA??) instead of KDrive??

Because the major tradeoffs made for kdrive (not building XKB and GLX,
for example) are either generally recognized to be false economies, or
are achievable with Xorg too.  And because nobody is maintaining kdrive,
or wants to.

> Do u have any ided why the mappingpScreen->PaintWindowBackground =
> kaaPaintWindow; has been removed in the next versions??

commit e4d11e58ce349dfe6af2f73ff341317f9b39684c
Author: Eric Anholt 
Date:   Wed Sep 12 13:58:46 2007 +

Remove the PaintWindow optimization.

This was an attempt to avoid scratch gc creation and validation for 
paintwindow
because that was expensive.  This is not the case in current servers, and 
the
danger of failure to implement it correctly (as seen in all previous
implementations) is high enough to justify removing it.  No performance
difference detected with x11perf -create -move -resize -circulate on Xvfb.
Leave the screen hooks for PaintWindow* in for now to avoid ABI change.

> and why is the function kaaFillRegionTiled() is left unimplemented and
> commented??

Probably because it was only ever called from the PaintWindow screen
hook.

As to your initial question:

> > Can you provide more clarity why are most of the Blit calls are
> > routed through software fallbacks.

fbBlt is called from more places than just the screen-to-screen copy
path.  In particular, it's called from the PutImage path (host to GPU
upload), which KAA does not (did not) accelerate.

- ajax



signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [xserver] add libc as a choice for SHA1 implementation

2009-11-02 Thread Keith Packard
Excerpts from Matthieu Herrb's message of Mon Nov 02 02:11:24 -0800 2009:

> New version taking you comments into account.

Thanks!

-- 
keith.pack...@intel.com


signature.asc
Description: PGP signature
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: Repo Tags

2009-11-02 Thread Alan Coopersmith


Michael Witten wrote:
> From what I can tell, releases are made by compiling a huge list of
> `module' version numbers in the xorg/util/modular repo's
> module-lists.txt file:
> 
> http://cgit.freedesktop.org/xorg/util/modular/tree/module-list.txt
> 
> The xorg/util/modular repo has tags like XORG-7_5 for each official release.

Yep - that's what's written in our release process instructions at:
http://xorg.freedesktop.org/wiki/Development/Documentation/ReleaseHOWTO

> It would be very useful if each `module' repo were also to have tags
> that reflect which of its revisions is included in which xorg
> releases. This seems to have been the policy at one point; for
> instance, the xf86-input-evdev repo lists tag XORG-7_1, but no such
> tag exists for later xorg releases.
> 
> Specifically, this kind of policy would (from what I can tell) allow
> one to construct, say, `an XORG-7_5 environment' just by checking out
> the XORG-7_5 tag in each relevant `module' repo.

That probably wouldn't be too hard to automate given the scripts I already
wrote to map modules & tags to the release versions, but I don't think I'll
have time to get to it soon.

-- 
-Alan Coopersmith-   alan.coopersm...@sun.com
 Sun Microsystems, Inc. - X Window System Engineering

___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [PATCH] i128: Make use of hardware byteswap on big-endian platforms.

2009-11-02 Thread Adam Jackson
On Sun, 2009-11-01 at 12:41 +0100, Mark Kettenis wrote:
> Fixes 16 and 32 bit-per-pixel modes with a Tech Source Raptor GFX-8M board
> on OpenBSD/sparc64 (although a few more diffs are needed for a working 
> driver).
> 
> Signed-off-by: Mark Kettenis 

Pushed, thanks.

- ajax


signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [PATCH] glx: swrast can do GLX 1.4 too

2009-11-02 Thread Adam Jackson
On Fri, 2009-10-30 at 11:19 -0400, Kristian Høgsberg wrote:
> On Thu, Oct 29, 2009 at 5:52 PM, Adam Jackson  wrote:
> > On Thu, 2009-10-29 at 14:01 -0700, keithp wrote:
> >> Excerpts from Adam Jackson's message of Thu Oct 29 11:01:29 -0700 2009:
> >>
> >> > +screen->base.GLXmajor = 1;
> >> > +screen->base.GLXminor = 4;
> >>
> >> Should this define be in a header somewhere?
> >
> > Maybe?  It ends up being variable per renderer atm, so you'd end up with
> >
> > #define GLX_SWRAST_PROTOCOL_MINOR   4
> > #define GLX_DRI_PROTOCOL_MINOR  2
> > #define GLX_DRI2_PROTOCOL_MINOR 4
> >
> > That doesn't seem especially useful, even as documentation.
> >
> > The whole thing is broken, in reality.  pbuffer support and separable
> > read/draw surfaces in the context are features that require some level
> > of hardware (well, renderer) support, and we're not querying the
> > renderer for their presence.
> 
> No, pbuffer support is entirely in GLX and all DRI2 drivers support
> separate draw and read drawables.

I didn't say we weren't giving the right answer, although I did say "in
reality" when I should have said "in theory".  I was trying to say we
were being chummy with the implementation.  Not all DRI1 drivers
necessarily support separate read/draw (although I think they do), and
I'm reasonably sure hardware has existed where the only offscreen
renderbuffer was the backbuffer.  It wouldn't be impossible to implement
pbuffers in that environment, but, the point remains, it requires
renderer support.

Still, swrast _can_ do 1.4, and I don't see a lot of value in
#define'ing that bit of truth.

- ajax


signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [PATCH] Add support for RENDER BGRA formats.

2009-11-02 Thread Adam Jackson
On Sat, 2009-10-31 at 16:17 +0100, Mark Kettenis wrote:

> > It might.  BAR 5 is the I/O map, and it's 256 bytes.  They're all
> > defined right up to 0xBC, which smells a little suspicious since the VGA
> > ports it claims to decode are 3C[0-5ACEF] and 3[BD][45A].  So while it
> > doesn't _say_ the VGA ports are aliased there starting at 0xC0, I'd be
> > utterly unsurprised.
> > 
> > I guess I don't see how it matters though, you can already get to the
> > colormap and mode, and you won't be using VGA fonts.  (Unless you do VGA
> > console even on non-x86 machines?  That would be vile.)
> 
> You're right of course.  I already disabled the VGA font save/restore
> because that required mapping the legacy VGA memory area and on the
> Sun Blade 1000 that I stuck this card in, that area is in use by other
> devices.
> 
> Still struggling a bit with how to map BAR 5 though.  I have a hack to
> make things work, but I'll need to discuss the implications for the
> OpenBSD kernel and libpciaccess.

Most non-x86 arches have a way to mmap the I/O space, but x86 (and
amd64) almost never does.  The portable way to do this would be like:

void *pci_device_open_io(struct pci_device *dev, int bar);
void pci_device_close_io(struct pci_device *dev, void *handle);
uint32_t pci_io_inl(void *handle, uint16_t reg);
uint16_t pci_io_inw(void *handle, uint16_t reg);
uint8_t pci_io_inb(void *handle, uint16_t reg);
void pci_io_outl(void *handle, uint16_t reg, uint32_t data);
void pci_io_outw(void *handle, uint16_t reg, uint16_t data);
void pci_io_outb(void *handle, uint16_t reg, uint8_t data);

On mappable arches the handle can be the mmap itself and the accessors
can be trivial, but on x86 it'd probably be the file handle
for /dev/port or the sysfs resource file and the accessors would be
pread/pwrite.

- ajax


signature.asc
Description: This is a digitally signed message part
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: error loading pixmap from HD

2009-11-02 Thread Alan Coopersmith
XReadBitmapFile can only read *.xbm format bitmaps (i.e. 1-bit-per-pixel)
not multi-color pixmaps of any form.   libXpm offers equivalent functionality
for *.xpm pixmap files.Other libraries from other projects provide
support for reading more advanced graphics formats.

-Alan Coopersmith-   alan.coopersm...@sun.com
 Sun Microsystems, Inc. - X Window System Engineering

Sotirios Karavarsamis wrote:
> Hello,
> 
> I am currently trying to load an image from disk and splash it on
> screen using XCopyArea(), and in the
> following snippet to perform the loading step:
> 
> res = XReadBitmapFile(display, win, BMP_TILE_BLOCKS, &w, &h,
> &game->platform->bmpTiles, &x_hot, &y_hot);
> 
> switch (res)
> {
> case BitmapOpenFailed:
>   printf("bitmap open failed\n");
>   break;
> case BitmapFileInvalid:
>   printf("bitmap invalid file\n");
>   break;
> case BitmapNoMemory:
>   printf("bitmap no memory!\n");
>   break;
> case BitmapSuccess:
>   printf("bitmap success\n");
> default:
>   printf("other error status\n");
>   break;
> }
> 
> I am constantly getting a "bitmap invalid file" error (case #2 in the
> above switch control). In the snippet above, the constant
> BMP_TILE_BLOCKS is a string representing the path to a PBM/XPM/BMP
> file on disk. However, neither image format works. How should I
> correctly load the image file from disk? Or I am I scrueing something
> in the snippet itself? Also, which image formats work correctly with
> this particular function?
> 
> I am looking forward to your reply.
> 
> A many thank you in advance,
> Sotiris Karavarsamis
> Computer Science Department, UOI
> ___
> xorg-devel mailing list
> xorg-devel@lists.x.org
> http://lists.x.org/mailman/listinfo/xorg-devel

___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


evtouch driver lockups

2009-11-02 Thread Alexander Sabourenkov
Hello.

I'm seeing Xorg lock up (sleep in futex) during stress-testing touchscreens - 
that 
consists of many touches in quick succession, where each touch makes something 
be 
redrawn.

Backtraces are identical in upper part - the signal handler, and differ in the 
lower part, that is from frame 11 onwards, but it's always something to do with 
malloc/free. See the example backtrace below.

As far as I understand, one should not do any memory allocation/deallocation 
or, 
more generally, anything that might be protected with locks, in a signal 
handler.

Now, my question is: how do I fix that?

Example backtrace:

#0  0x00bf4422 in __kernel_vsyscall ()
#1  0x00235ae3 in __lll_lock_wait_private () at 
../nptl/sysdeps/unix/sysv/linux/i386/i686/../i486/lowlevellock.S:95
#2  0x001ccf07 in _L_lock_9496 () from /lib/tls/i686/cmov/libc.so.6
#3  0x001cb796 in *__GI___libc_free (mem=0x930a548) at malloc.c:3714
#4  0x08137a21 in Xfree (ptr=0xfe00) at ../../os/utils.c:1165
#5  0x0813127e in TimerFree (timer=0x930a548) at ../../os/WaitFor.c:518
#6  0x0062423b in DoBtnAction (local=0x9251898) at evtouch.c:260
#7  ReadInput (local=0x9251898) at evtouch.c:845
#8  0x080c7ef7 in xf86SigioReadInput (fd=17, closure=0x9251898) at 
../../../../hw/xfree86/common/xf86Events.c:311
#9  0x080b87b4 in xf86SIGIO (sig=29) at 
../../../../../hw/xfree86/os-support/linux/../shared/sigio.c:114
#10 
#11 _int_free (av=, p=0x93f7230) at malloc.c:4831
#12 0x001cb79d in *__GI___libc_free (mem=0x93f7238) at malloc.c:3716
#13 0x08137a21 in Xfree (ptr=0x0) at ../../os/utils.c:1165
#14 0x08120cb4 in miRegionDestroy (pReg=0x93f7238) at ../../mi/miregion.c:258
#15 0x0816f126 in miDestroyPicture (pPicture=0x92fa040) at 
../../render/mipict.c:50
#16 0x08171b6d in FreePicture (value=0x92fa040, pid=23101378) at 
../../render/picture.c:1508
#17 0x08074e82 in FreeResource (id=23101378, skipDeleteFuncType=0) at 
../../dix/resource.c:561
#18 0x08179722 in ProcRenderFreePicture (client=0x92978e0) at 
../../render/render.c:684
#19 0x081742e5 in ProcRenderDispatch (client=0x29d3a0) at 
../../render/render.c:2089
#20 0x0808d17f in Dispatch () at ../../dix/dispatch.c:456
#21 0x08072515 in main (argc=5, argv=0xbfed81a4, envp=0xbfed81bc) at 
../../dix/main.c:397
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: Xorg segmentation fault when drawing PolyArcs (under kicad)

2009-11-02 Thread Michel Dänzer
On Wed, 2009-10-21 at 14:25 +0100, Renato Caldas wrote: 
> 2009/10/21 Renato Caldas :
> > 2009/10/20 Renato Caldas :
> >> 2009/10/20 Renato Caldas :
> >>>
> >>> 2009/10/20 Michel Dänzer :
>  On Mon, 2009-10-12 at 20:12 +0100, Renato Caldas wrote:
> >
> > When using (trying to use...), kicad I managed to get consistent Xorg
> > segmentation faults. I've filed a bug report on fedora's bugzilla:
> > https://bugzilla.redhat.com/show_bug.cgi?id=528475. There I've
> > included a very basic test case that works all the time. In the
> > meantime, I've also got my hands dirty, and tried to debug Xorg using
> > gdb.
> >
> > The symptom is that the function fbBltOne is called with src=0x0, so
> > Xorg segfaults as soon as it tries to use src (in LoadBits; at
> > fb/fbbltone.c:292).
> >
> > I've traced its value back to fbPushPixels, where it is created and
> > fed to the function chain, in the macro fbGetStipDrawable (defined at
> > fb/fb.h:720).
> >
> > Here pDrawable->type seems to be DRAWABLE_PIXMAP, so _pPix is simply
> > cast from pDrawable. Then the _pPix -> devPrivate.ptr is used as
> > "src", after a couple of castings and copying around.
> 
>  Please provide a full backtrace ('bt full' in gdb) from when the crash
>  occurs.
> >>>
> >>> Attached. I've also attached it to the bug report, just in case.
> >>>
>  If you're using EXA, the problem is most likely that some code path
>  isn't calling exaPrepareAccess(Reg) for the pixmap in question before
>  calling down to the fb module.
> >
> > It seems that the code path is calling exaPrepareAccess:
> >
> > in ExaCheckPolyArc:
> >
> > (...)
> > exaPrepareAccess (pDrawable, EXA_PREPARE_DEST);
> > exaPrepareAccessGC (pGC);
> > pGC->ops->PolyArc (pDrawable, pGC, narcs, pArcs);
> > (...)
> >
> > but there's possibly something wrong there.
> 
> You're right, I found the problem, and it seems to be tricky! It seems
> that the scratch buffers used by a lot of functions never call
> exaPrepareAccess.
> 
> I've created a small test program, and it never crashed. It turned out
> that kicad used a "GXor" operation, which was flagged as "tricky",
> requiring a scratch buffer. This scratch buffer would then be used
> without "preparation". There are a lot of functions that create a
> scratch buffer, but they're possibly rarely used:
> 
> $ grep CREATE_PIXMAP_USAGE_SCRATCH * -lR
> dix/glyphcurs.c
> exa/exa_glyphs.c
> hw/xfree86/xaa/xaaInit.c
> include/scrnintstr.h
> mi/miarc.c
> mi/midispcur.c
> mi/mibitblt.c
> mi/miglblt.c
> render/render.c
> render/glyph.c
> render/mirect.c
> Xext/mbuf.c
> Xext/shm.c
> Xext/mbufpx.c
> 
> Some of them may be safe, but at least the mi/ files should be fixed.
> 
> The "tricky" part of the problem is how to call exaPrepareAccess in a
> clean way. fbPrepareAccess calls the driver's "PrepareAccess", so it
> seems to be the correct solution. But it shouldn't be called directly
> either, so I first need to put fbPrepareAccess in the *pGC->ops.
> 
> Does this sound ok?

I'm afraid not.

exaPrepareAccess* are internal to EXA, no other layer needs to know
about them.

It sounds like either the logic in exaPrepareAccessGC() is wrong for
preparing access to the stipple or tile pixmap, or the pixmap in
question comes from yet another place not handled in
exaPrepareAccessGC() or ExaCheckPolyArc() yet. Can you find out where in
the GC or other data structure the pointer to the pixmap in question is
retrieved from?


-- 
Earthling Michel Dänzer   |http://www.vmware.com
Libre software enthusiast |  Debian, X and DRI developer













___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [PATCH 1/2] exa: remove some outdated comment

2009-11-02 Thread Michel Dänzer
On Sun, 2009-11-01 at 22:08 +0100, Maarten Maathuis wrote: 
> - This comment is still in exa_driver.c and there it makes sense.
> 
> Signed-off-by: Maarten Maathuis 
> ---
>  exa/exa_mixed.c |4 
>  1 files changed, 0 insertions(+), 4 deletions(-)
> 
> diff --git a/exa/exa_mixed.c b/exa/exa_mixed.c
> index bc393c7..b29ee35 100644
> --- a/exa/exa_mixed.c
> +++ b/exa/exa_mixed.c
> @@ -154,10 +154,6 @@ exaModifyPixmapHeader_mixed(PixmapPtr pPixmap, int 
> width, int height, int depth,
>  if (pExaScr->info->ModifyPixmapHeader && pExaPixmap->driverPriv) {
>   ret = pExaScr->info->ModifyPixmapHeader(pPixmap, width, height, depth,
>   bitsPerPixel, devKind, 
> pPixData);
> - /* For EXA_HANDLES_PIXMAPS, we set pPixData to NULL.
> -  * If pPixmap->devPrivate.ptr is non-NULL, then we've got a 
> non-offscreen pixmap.
> -  * We need to store the pointer, because PrepareAccess won't be called.
> -  */
>   if (ret == TRUE)
>   goto out;
>  }

Acked-by: Michel Dänzer 


-- 
Earthling Michel Dänzer   |http://www.vmware.com
Libre software enthusiast |  Debian, X and DRI developer


___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Re: [xserver] add libc as a choice for SHA1 implementation

2009-11-02 Thread Matthieu Herrb
On Sun, Nov 01, 2009 at 02:54:13PM -0800, Keith Packard wrote:
> Excerpts from Matthieu Herrb's message of Sun Nov 01 09:34:35 -0800 2009:
> 
> > +AC_CHECK_FUNCS([SHA1Init], [HAVE_LIBC_SHA1=yes])
> 
> I'd suggest AC_CHECK_FUNC instead; as far as I can tell, AC_CHECK_FUNCS
> will also define HAVE_SHA1INIT. Also, can you  use HAVE_LIBC_SHA1
> consistently rather than having two separate names (HAVE_LIBC_SHA1 and
> HAVE_SHA1_IN_LIBC)? Yes, I know one is a preprocessor symbol and the
> other is a cpp shell variable, but I think that will work anyway.
> 
New version taking you comments into account.

From: Matthieu Herrb 
Date: Sun, 1 Nov 2009 18:19:27 +0100
Subject: [PATCH] Add a probe for SHA1 functions in libc in *BSD.

The interface is the same as the one in libmd.
---
 configure.ac|   14 +-
 include/dix-config.h.in |3 +++
 os/xsha1.c  |3 ++-
 3 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/configure.ac b/configure.ac
index 7d87b29..9ff9550 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1297,8 +1297,20 @@ CORE_INCS='-I$(top_srcdir)/include 
-I$(top_builddir)/include'
 
 # SHA1 hashing
 AC_ARG_WITH([sha1],
-[AS_HELP_STRING([--with-sha1=libmd|libgcrypt|libcrypto|libsha1],
+
[AS_HELP_STRING([--with-sha1=libc|libmd|libgcrypt|libcrypto|libsha1],
 [choose SHA1 implementation])])
+AC_CHECK_FUNC([SHA1Init], [HAVE_SHA1_IN_LIBC=yes])
+if test "x$with_sha1" = x && test "x$HAVE_SHA1_IN_LIBC" = xyes; then
+   with_sha1=libc
+fi
+if test "x$with_sha1" = xlibc && test "x$HAVE_SHA1_IN_LIBC" != xyes; then
+   AC_MSG_ERROR([libc requested but not found])
+fi
+if test "x$with_sha1" = xlibc; then
+   AC_DEFINE([HAVE_SHA1_IN_LIBC], [1],
+   [Use libc SHA1 functions])
+   SHA1_LIBS=""
+fi
 AC_CHECK_LIB([md], [SHA1Init], [HAVE_LIBMD=yes])
 if test "x$with_sha1" = x && test "x$HAVE_LIBMD" = xyes; then
with_sha1=libmd
diff --git a/include/dix-config.h.in b/include/dix-config.h.in
index e2bc18e..a57d9b6 100644
--- a/include/dix-config.h.in
+++ b/include/dix-config.h.in
@@ -160,6 +160,9 @@
 /* Define to 1 if you have the  header file. */
 #undef HAVE_RPCSVC_DBM_H
 
+/* Define to use libc SHA1 functions */
+#undef HAVE_SHA1_IN_LIBC
+
 /* Define to use libmd SHA1 functions */
 #undef HAVE_SHA1_IN_LIBMD
 
diff --git a/os/xsha1.c b/os/xsha1.c
index 94092ca..229ce89 100644
--- a/os/xsha1.c
+++ b/os/xsha1.c
@@ -5,7 +5,8 @@
 #include "os.h"
 #include "xsha1.h"
 
-#ifdef HAVE_SHA1_IN_LIBMD /* Use libmd for SHA1 */
+#if defined(HAVE_SHA1_IN_LIBMD)  /* Use libmd for SHA1 */ \
+   || defined(HAVE_SHA1_IN_LIBC) /* Use libc for SHA1 */
 
 # include 
 
-- 
1.6.5.1




-- 
Matthieu Herrb
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel


Repo Tags

2009-11-02 Thread Michael Witten
>From what I can tell, releases are made by compiling a huge list of
`module' version numbers in the xorg/util/modular repo's
module-lists.txt file:

http://cgit.freedesktop.org/xorg/util/modular/tree/module-list.txt

The xorg/util/modular repo has tags like XORG-7_5 for each official release.

It would be very useful if each `module' repo were also to have tags
that reflect which of its revisions is included in which xorg
releases. This seems to have been the policy at one point; for
instance, the xf86-input-evdev repo lists tag XORG-7_1, but no such
tag exists for later xorg releases.

Specifically, this kind of policy would (from what I can tell) allow
one to construct, say, `an XORG-7_5 environment' just by checking out
the XORG-7_5 tag in each relevant `module' repo.

Sincerely,
Michael Witten
___
xorg-devel mailing list
xorg-devel@lists.x.org
http://lists.x.org/mailman/listinfo/xorg-devel