[PATCH 1/5] handle extenion for detail timing block

2008-11-17 Thread ling . ma
From: MaLing<[EMAIL PROTECTED]>

hi All,
I updated extensions for detail timing block.
In this version, I devide it into 5 patchs.
[PATCH 1/5] implement common functions & structures, and handl funcion.
[PATCH 2/5] handl detail timing data in xf86EdidModes.c file. 
[PATCH 3/5] handl detail timing data in xf86Crtc.c file.
[PATCH 4/5] handl detail timing data in xf86Configure.c file.
[PATCH 5/5] handl detail timing data in print_edid.c file.

Any comments are welcome !
Thanks
Ma Ling
---
 hw/xfree86/ddc/edid.h   |   73 ++
 hw/xfree86/ddc/interpret_edid.c |  197 +--
 hw/xfree86/ddc/xf86DDC.h|   35 +++
 hw/xfree86/modes/xf86Modes.h|3 -
 4 files changed, 254 insertions(+), 54 deletions(-)

diff --git a/hw/xfree86/ddc/edid.h b/hw/xfree86/ddc/edid.h
index 45caf6e..f518137 100644
--- a/hw/xfree86/ddc/edid.h
+++ b/hw/xfree86/ddc/edid.h
@@ -549,4 +549,77 @@ typedef struct {
 
 extern xf86MonPtr ConfiguredMonitor;
 
+#define CEA_EXT   0x02
+#define VTB_EXT   0x10
+#define DI_EXT0x40
+#define LS_EXT0x50
+#define MI_EXT0x60
+
+#define CEA_EXT_MIN_DATA_OFFSET 4
+#define CEA_EXT_MAX_DATA_OFFSET 127
+#define CEA_EXT_DET_TIMING_NUM 6
+
+#define EXT_TAG 0
+#define EXT_REV 1
+
+struct cea_video_blk {
+  Uchar video_code; 
+};
+
+struct cea_audio_blk {
+Uchar descs[3];
+};
+
+struct hdmi {
+  Uchar  Support_flags;
+  Uchar  Max_TMDS_Clock;
+  Uchar  Latency_Present;
+  Uchar  Video_Latency;
+  Uchar  Audio_Latency;
+  Uchar  Interlaced_Video_Latency;
+  Uchar  Interlaced_Audio_Latency;
+};
+
+struct cea_vendor_blk {
+  unsigned char ieee_id[3];
+  Uchar  Port_Addr[2];
+  struct hdmi hdmi;
+};
+
+struct cea_speaker_blk 
+{
+  Uchar FLR:1;
+  Uchar LFE:1;
+  Uchar FC:1;
+  Uchar RLR:1;
+  Uchar RC:1;
+  Uchar FLRC:1;
+  Uchar RLRC:1;
+  Uchar FLRW:1;
+  Uchar FLRH:1;
+  Uchar TC:1;
+  Uchar FCH:1;
+  Uchar Resv:5;
+  Uchar Resv_Byte;
+};
+
+struct cea_data_blk {
+  Uchar len:5;
+  Uchar tag:3;
+union{
+struct cea_video_blk video;
+struct cea_audio_blk audio;
+struct cea_vendor_blk vendor;
+struct cea_speaker_blk speaker;
+  }u;
+};
+
+struct cea_ext_body {
+  Uchar tag;
+  Uchar rev;
+  Uchar dt_offset;
+  Uchar flags;
+  struct cea_data_blk data_collection;
+};
+
 #endif /* _EDID_H_ */
diff --git a/hw/xfree86/ddc/interpret_edid.c b/hw/xfree86/ddc/interpret_edid.c
index c4d8963..e7c6049 100644
--- a/hw/xfree86/ddc/interpret_edid.c
+++ b/hw/xfree86/ddc/interpret_edid.c
@@ -42,7 +42,7 @@ static void get_established_timing_section(Uchar*, struct 
established_timings *)
 static void get_std_timing_section(Uchar*, struct std_timings *,
   struct edid_version *);
 static void get_dt_md_section(Uchar *, struct edid_version *,
- struct detailed_monitor_section *det_mon);
+  struct detailed_monitor_section *det_mon, int);
 static void copy_string(Uchar *, Uchar *);
 static void get_dst_timing_section(Uchar *, struct std_timings *,
   struct edid_version *);
@@ -52,11 +52,27 @@ static void get_detailed_timing_section(Uchar*, struct  
detailed_timings *);
 static Bool validate_version(int scrnIndex, struct edid_version *);
 
 static void
+find_ranges_section(struct detailed_monitor_section *det, void *ranges)
+{
+   if (det->type == DS_RANGES)
+   *(struct monitor_ranges **)ranges = &det->section.ranges;
+}
+
+static void
+find_max_detailed_clock(struct detailed_monitor_section *det, void *ret)
+{
+int *clock = ret;
+
+if (det->type == DT) {
+struct detailed_timings *t = &det->section.d_timings;
+*clock = max(*clock, t->clock);
+}
+}
+
+static void
 handle_edid_quirks(xf86MonPtr m)
 {
-int i, j;
-struct detailed_timings *preferred_timing;
-struct monitor_ranges *ranges;
+struct monitor_ranges *ranges = NULL;
 
 /*
  * max_clock is only encoded in EDID in tens of MHz, so occasionally we
@@ -64,28 +80,116 @@ handle_edid_quirks(xf86MonPtr m)
  * similar.  Strictly we should refuse to round up too far, but let's
  * see how well this works.
  */
-for (i = 0; i < 4; i++) {
-   if (m->det_mon[i].type == DS_RANGES) {
-   ranges = &m->det_mon[i].section.ranges;
-   for (j = 0; j < 4; j++) {
-   if (m->det_mon[j].type == DT) {
-   preferred_timing = &m->det_mon[j].section.d_timings;
-   if (!ranges->max_clock) continue; /* zero is legal */
-   if (ranges->max_clock * 100 < preferred_timing->clock) {
-   xf86Msg(X_WARNING,
-   "EDID preferred timing clock %.2fMHz exceeds "
-   "claimed max %dMHz, fixing\n",
-   preferred_timing->clock / 1.0e6,
-   ranges->max_clock);
-   ranges->max_clock =
-   (preferred_timin

[PATCH 2/5] handle extenion for detail timing block

2008-11-17 Thread ling . ma
From: root <[EMAIL PROTECTED]>

---
 hw/xfree86/modes/xf86EdidModes.c |  264 +++---
 1 files changed, 133 insertions(+), 131 deletions(-)

diff --git a/hw/xfree86/modes/xf86EdidModes.c b/hw/xfree86/modes/xf86EdidModes.c
index bea2f7e..5c67c8c 100644
--- a/hw/xfree86/modes/xf86EdidModes.c
+++ b/hw/xfree86/modes/xf86EdidModes.c
@@ -45,20 +45,24 @@
 #include 
 #include 
 
+void static handle_detailed_rblank(struct detailed_monitor_section *det_mon,
+   void *data)
+{
+
+if (det_mon->type == DS_RANGES)
+if (det_mon->section.ranges.supported_blanking & CVT_REDUCED)
+*(Bool*)data = TRUE;
+}
+
 static Bool
 xf86MonitorSupportsReducedBlanking(xf86MonPtr DDC)
 {
 /* EDID 1.4 explicitly defines RB support */
 if (DDC->ver.revision >= 4) {
-   int i;
-   for (i = 0; i < DET_TIMINGS; i++) {
-   struct detailed_monitor_section *det_mon = &DDC->det_mon[i];
-   if (det_mon->type == DS_RANGES)
-   if (det_mon->section.ranges.supported_blanking & CVT_REDUCED)
-   return TRUE;
-   }
-   
-   return FALSE;
+Bool ret = FALSE;
+ 
+xf86ForEachDetailedBlock(DDC, handle_detailed_rblank, &ret);
+return ret;
 }
 
 /* For anything older, assume digital means RB support. Boo. */
@@ -72,30 +76,6 @@ xf86MonitorSupportsReducedBlanking(xf86MonPtr DDC)
  * Quirks to work around broken EDID data from various monitors.
  */
 
-typedef enum {
-DDC_QUIRK_NONE = 0,
-/* First detailed mode is bogus, prefer largest mode at 60hz */
-DDC_QUIRK_PREFER_LARGE_60 = 1 << 0,
-/* 135MHz clock is too high, drop a bit */
-DDC_QUIRK_135_CLOCK_TOO_HIGH = 1 << 1,
-/* Prefer the largest mode at 75 Hz */
-DDC_QUIRK_PREFER_LARGE_75 = 1 << 2,
-/* Convert detailed timing's horizontal from units of cm to mm */
-DDC_QUIRK_DETAILED_H_IN_CM = 1 << 3,
-/* Convert detailed timing's vertical from units of cm to mm */
-DDC_QUIRK_DETAILED_V_IN_CM = 1 << 4,
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-DDC_QUIRK_DETAILED_USE_MAXIMUM_SIZE = 1 << 5,
-/* Monitor forgot to set the first detailed is preferred bit. */
-DDC_QUIRK_FIRST_DETAILED_PREFERRED = 1 << 6,
-/* use +hsync +vsync for detailed mode */
-DDC_QUIRK_DETAILED_SYNC_PP = 1 << 7,
-/* Force single-link DVI bandwidth limit */
-DDC_QUIRK_DVI_SINGLE_LINK = 1 << 8,
-} ddc_quirk_t;
-
 static Bool quirk_prefer_large_60 (int scrnIndex, xf86MonPtr DDC)
 {
 /* Belinea 10 15 55 */
@@ -667,7 +647,7 @@ DDCGuessRangesFromModes(int scrnIndex, MonPtr Monitor, 
DisplayModePtr Modes)
 }
 }
 
-static ddc_quirk_t
+ddc_quirk_t
 xf86DDCDetectQuirks(int scrnIndex, xf86MonPtr DDC, Bool verbose)
 {
 ddc_quirk_tquirks;
@@ -693,28 +673,23 @@ xf86DDCDetectQuirks(int scrnIndex, xf86MonPtr DDC, Bool 
verbose)
  * Note that some quirks applying to the mode list are still implemented in
  * xf86DDCGetModes.
  */
-void
-xf86DDCApplyQuirks(int scrnIndex, xf86MonPtr DDC)
+void xf86DetTimingApplyQuirks(struct detailed_monitor_section *det_mon,
+  ddc_quirk_t quirks,
+  int hsize, int vsize)  
 {
-ddc_quirk_t quirks = xf86DDCDetectQuirks (scrnIndex, DDC, FALSE);
-int i;
 
-for (i = 0; i < DET_TIMINGS; i++) {
-   struct detailed_monitor_section *det_mon = &DDC->det_mon[i];
-
-   if (det_mon->type != DT)
-   continue;
+if (det_mon->type != DT)
+return;
 
-   if (quirks & DDC_QUIRK_DETAILED_H_IN_CM)
-   det_mon->section.d_timings.h_size *= 10;
+if (quirks & DDC_QUIRK_DETAILED_H_IN_CM)
+det_mon->section.d_timings.h_size *= 10;
 
-   if (quirks & DDC_QUIRK_DETAILED_V_IN_CM)
-   det_mon->section.d_timings.v_size *= 10;
+if (quirks & DDC_QUIRK_DETAILED_V_IN_CM)
+det_mon->section.d_timings.v_size *= 10;
 
-   if (quirks & DDC_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
-   det_mon->section.d_timings.h_size = 10 * DDC->features.hsize;
-   det_mon->section.d_timings.v_size = 10 * DDC->features.vsize;
-   }
+if (quirks & DDC_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+det_mon->section.d_timings.h_size = 10 * hsize;
+det_mon->section.d_timings.v_size = 10 * vsize;
 }
 }
 
@@ -759,14 +734,61 @@ xf86DDCSetPreferredRefresh(int scrnIndex, DisplayModePtr 
modes,
best->type |= M_T_PREFERRED;
 }
 
+struct det_modes_parameter {
+xf86MonPtr DDC;
+ddc_quirk_t quirks;
+DisplayModePtr * Modes;
+Bool rb;
+Bool preferred;
+int timing_level;
+};
+
+static void handle_detailed_modes(struct detailed_monitor_section *det_mon,
+ void *data)
+{
+DisplayModePtr  Mode;
+struct det_modes_parameter *p = (struct det_modes_parameter *)data;
+
+xf86DetTimingApplyQuirks(det_mon,p->

[PATCH 2/5] handle extenion for detail timing block

2008-11-17 Thread ling . ma
From: MaLing <[EMAIL PROTECTED]>

---
 hw/xfree86/modes/xf86EdidModes.c |  264 +++---
 1 files changed, 133 insertions(+), 131 deletions(-)

diff --git a/hw/xfree86/modes/xf86EdidModes.c b/hw/xfree86/modes/xf86EdidModes.c
index bea2f7e..5c67c8c 100644
--- a/hw/xfree86/modes/xf86EdidModes.c
+++ b/hw/xfree86/modes/xf86EdidModes.c
@@ -45,20 +45,24 @@
 #include 
 #include 
 
+void static handle_detailed_rblank(struct detailed_monitor_section *det_mon,
+   void *data)
+{
+
+if (det_mon->type == DS_RANGES)
+if (det_mon->section.ranges.supported_blanking & CVT_REDUCED)
+*(Bool*)data = TRUE;
+}
+
 static Bool
 xf86MonitorSupportsReducedBlanking(xf86MonPtr DDC)
 {
 /* EDID 1.4 explicitly defines RB support */
 if (DDC->ver.revision >= 4) {
-   int i;
-   for (i = 0; i < DET_TIMINGS; i++) {
-   struct detailed_monitor_section *det_mon = &DDC->det_mon[i];
-   if (det_mon->type == DS_RANGES)
-   if (det_mon->section.ranges.supported_blanking & CVT_REDUCED)
-   return TRUE;
-   }
-   
-   return FALSE;
+Bool ret = FALSE;
+ 
+xf86ForEachDetailedBlock(DDC, handle_detailed_rblank, &ret);
+return ret;
 }
 
 /* For anything older, assume digital means RB support. Boo. */
@@ -72,30 +76,6 @@ xf86MonitorSupportsReducedBlanking(xf86MonPtr DDC)
  * Quirks to work around broken EDID data from various monitors.
  */
 
-typedef enum {
-DDC_QUIRK_NONE = 0,
-/* First detailed mode is bogus, prefer largest mode at 60hz */
-DDC_QUIRK_PREFER_LARGE_60 = 1 << 0,
-/* 135MHz clock is too high, drop a bit */
-DDC_QUIRK_135_CLOCK_TOO_HIGH = 1 << 1,
-/* Prefer the largest mode at 75 Hz */
-DDC_QUIRK_PREFER_LARGE_75 = 1 << 2,
-/* Convert detailed timing's horizontal from units of cm to mm */
-DDC_QUIRK_DETAILED_H_IN_CM = 1 << 3,
-/* Convert detailed timing's vertical from units of cm to mm */
-DDC_QUIRK_DETAILED_V_IN_CM = 1 << 4,
-/* Detailed timing descriptors have bogus size values, so just take the
- * maximum size and use that.
- */
-DDC_QUIRK_DETAILED_USE_MAXIMUM_SIZE = 1 << 5,
-/* Monitor forgot to set the first detailed is preferred bit. */
-DDC_QUIRK_FIRST_DETAILED_PREFERRED = 1 << 6,
-/* use +hsync +vsync for detailed mode */
-DDC_QUIRK_DETAILED_SYNC_PP = 1 << 7,
-/* Force single-link DVI bandwidth limit */
-DDC_QUIRK_DVI_SINGLE_LINK = 1 << 8,
-} ddc_quirk_t;
-
 static Bool quirk_prefer_large_60 (int scrnIndex, xf86MonPtr DDC)
 {
 /* Belinea 10 15 55 */
@@ -667,7 +647,7 @@ DDCGuessRangesFromModes(int scrnIndex, MonPtr Monitor, 
DisplayModePtr Modes)
 }
 }
 
-static ddc_quirk_t
+ddc_quirk_t
 xf86DDCDetectQuirks(int scrnIndex, xf86MonPtr DDC, Bool verbose)
 {
 ddc_quirk_tquirks;
@@ -693,28 +673,23 @@ xf86DDCDetectQuirks(int scrnIndex, xf86MonPtr DDC, Bool 
verbose)
  * Note that some quirks applying to the mode list are still implemented in
  * xf86DDCGetModes.
  */
-void
-xf86DDCApplyQuirks(int scrnIndex, xf86MonPtr DDC)
+void xf86DetTimingApplyQuirks(struct detailed_monitor_section *det_mon,
+  ddc_quirk_t quirks,
+  int hsize, int vsize)  
 {
-ddc_quirk_t quirks = xf86DDCDetectQuirks (scrnIndex, DDC, FALSE);
-int i;
 
-for (i = 0; i < DET_TIMINGS; i++) {
-   struct detailed_monitor_section *det_mon = &DDC->det_mon[i];
-
-   if (det_mon->type != DT)
-   continue;
+if (det_mon->type != DT)
+return;
 
-   if (quirks & DDC_QUIRK_DETAILED_H_IN_CM)
-   det_mon->section.d_timings.h_size *= 10;
+if (quirks & DDC_QUIRK_DETAILED_H_IN_CM)
+det_mon->section.d_timings.h_size *= 10;
 
-   if (quirks & DDC_QUIRK_DETAILED_V_IN_CM)
-   det_mon->section.d_timings.v_size *= 10;
+if (quirks & DDC_QUIRK_DETAILED_V_IN_CM)
+det_mon->section.d_timings.v_size *= 10;
 
-   if (quirks & DDC_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
-   det_mon->section.d_timings.h_size = 10 * DDC->features.hsize;
-   det_mon->section.d_timings.v_size = 10 * DDC->features.vsize;
-   }
+if (quirks & DDC_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+det_mon->section.d_timings.h_size = 10 * hsize;
+det_mon->section.d_timings.v_size = 10 * vsize;
 }
 }
 
@@ -759,14 +734,61 @@ xf86DDCSetPreferredRefresh(int scrnIndex, DisplayModePtr 
modes,
best->type |= M_T_PREFERRED;
 }
 
+struct det_modes_parameter {
+xf86MonPtr DDC;
+ddc_quirk_t quirks;
+DisplayModePtr * Modes;
+Bool rb;
+Bool preferred;
+int timing_level;
+};
+
+static void handle_detailed_modes(struct detailed_monitor_section *det_mon,
+ void *data)
+{
+DisplayModePtr  Mode;
+struct det_modes_parameter *p = (struct det_modes_parameter *)data;
+
+xf86DetTimingApplyQuirks(det_mon,p

Re: exaCopyNtoN calls driver Prepare/Done Copy with no Copy in between

2008-11-17 Thread Michel Dänzer
On Mon, 2008-11-17 at 11:21 +1000, Dave Airlie wrote:
> >>>
> >>> I also see some from exaFillRegionTiled
> >>
> >> Sounds like maybe exaCopyNtoN and exaFillRegionTiled should bail early
> >> if nbox == 0. Or maybe that should really be done higher up, e.g. the
> >> damage layer could not call down unless really necessary.
> >
> > Okay one of them was from CopyNtoN getting nbox == 0, so I just made
> > it bail, simple patch so I checked it in.
> >
> > The other is from the tiled code doing the second pass for leftover areas.
> >
> > Initial patch is attached it just prechecks if the copies will be
> > needed and avoids them if they aren't, this one I thought
> > might need some review.
> >
> 
> Obligatory logic error.
> 
> Updated patch.

[...]

> +   if ((dstX < pBox[i].x2) || (dstY < pBox[i].y2))
> +   more_copy = TRUE;
> +   }

You could add a break statement here, or maybe do these checks during
the initial copy loop.

Looks good to me otherwise.


-- 
Earthling Michel Dänzer   |  http://tungstengraphics.com
Libre software enthusiast |  Debian, X and DRI developer

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

[PATCH 3/5] handle extenion for detail timing block

2008-11-17 Thread ling . ma
From: MaLing <[EMAIL PROTECTED]>

---
 hw/xfree86/modes/xf86Crtc.c |  135 +++
 1 files changed, 84 insertions(+), 51 deletions(-)

diff --git a/hw/xfree86/modes/xf86Crtc.c b/hw/xfree86/modes/xf86Crtc.c
index f072109..1a3908b 100644
--- a/hw/xfree86/modes/xf86Crtc.c
+++ b/hw/xfree86/modes/xf86Crtc.c
@@ -1394,6 +1394,42 @@ GuessRangeFromModes(MonPtr mon, DisplayModePtr mode)
mon->vrefresh[0].lo = 58.0;
 }
 
+struct det_monrec_parameter {
+MonRec *mon_rec;
+int *max_clock;
+int *sync_source; 
+};
+
+static void handle_detailed_monrec(struct detailed_monitor_section *det_mon,
+   void *data)
+{  
+enum { sync_config, sync_edid, sync_default };
+struct det_monrec_parameter *p; 
+p = (struct det_monrec_parameter *)data;
+
+if (det_mon->type == DS_RANGES) {
+struct monitor_ranges   *ranges = &det_mon->section.ranges;
+if (p->mon_rec->nHsync == 0 && ranges->max_h) {
+p->mon_rec->hsync[p->mon_rec->nHsync].lo = ranges->min_h;
+p->mon_rec->hsync[p->mon_rec->nHsync].hi = ranges->max_h;
+p->mon_rec->nHsync++;
+if (*p->sync_source == sync_default)
+   *p->sync_source = sync_edid;
+   }
+
+   if (p->mon_rec->nVrefresh == 0 && ranges->max_v) {
+   p->mon_rec->vrefresh[p->mon_rec->nVrefresh].lo = ranges->min_v;
+   p->mon_rec->vrefresh[p->mon_rec->nVrefresh].hi = ranges->max_v;
+   p->mon_rec->nVrefresh++;
+   if (*p->sync_source == sync_default)
+   *p->sync_source = sync_edid;
+   }
+
+   if (ranges->max_clock * 1000 > *p->max_clock)
+   *p->max_clock = ranges->max_clock * 1000;
+}
+}
+
 _X_EXPORT void
 xf86ProbeOutputModes (ScrnInfoPtr scrn, int maxX, int maxY)
 {
@@ -1470,38 +1506,16 @@ xf86ProbeOutputModes (ScrnInfoPtr scrn, int maxX, int 
maxY)
output_modes = (*output->funcs->get_modes) (output);

edid_monitor = output->MonInfo;
-   
-   if (edid_monitor)
-   {
-   int i;
-   Boolset_hsync = mon_rec.nHsync == 0;
-   Boolset_vrefresh = mon_rec.nVrefresh == 0;
 
-   for (i = 0; i < sizeof (edid_monitor->det_mon) / sizeof 
(edid_monitor->det_mon[0]); i++)
-   {
-   if (edid_monitor->det_mon[i].type == DS_RANGES)
-   {
-   struct monitor_ranges   *ranges = 
&edid_monitor->det_mon[i].section.ranges;
-   if (set_hsync && ranges->max_h)
-   {
-   mon_rec.hsync[mon_rec.nHsync].lo = ranges->min_h;
-   mon_rec.hsync[mon_rec.nHsync].hi = ranges->max_h;
-   mon_rec.nHsync++;
-   if (sync_source == sync_default)
-   sync_source = sync_edid;
-   }
-   if (set_vrefresh && ranges->max_v)
-   {
-   mon_rec.vrefresh[mon_rec.nVrefresh].lo = ranges->min_v;
-   mon_rec.vrefresh[mon_rec.nVrefresh].hi = ranges->max_v;
-   mon_rec.nVrefresh++;
-   if (sync_source == sync_default)
-   sync_source = sync_edid;
-   }
-   if (ranges->max_clock * 1000 > max_clock)
-   max_clock = ranges->max_clock * 1000;
-   }
-   }
+   if (edid_monitor) {
+struct det_monrec_parameter p;
+p.mon_rec = &mon_rec;
+p.max_clock = &max_clock;
+p.sync_source = (int *)&sync_source;
+
+xf86ForEachDetailedBlock(edid_monitor,
+handle_detailed_monrec,
+&p);
}
 
if (xf86GetOptValFreq (output->options, OPTION_MIN_CLOCK,
@@ -2601,6 +2615,37 @@ xf86OutputSetEDIDProperty (xf86OutputPtr output, void 
*data, int data_len)
 
 #endif
 
+
+/* Pull out a phyiscal size from a detailed timing if available. */
+struct det_phySize_parameter {
+xf86OutputPtr output;
+ddc_quirk_t quirks;
+Bool ret;   
+};
+
+void static handle_detailed_physical_size(struct detailed_monitor_section 
+ *det_mon, void *data)
+{
+struct det_phySize_parameter *p;
+p = (struct det_phySize_parameter *)data;
+
+if (p->ret == TRUE )
+return ;
+
+xf86DetTimingApplyQuirks(det_mon, p->quirks,
+ p->output->MonInfo->features.hsize,
+ p->output->MonInfo->features.vsize);
+
+if (det_mon->type == DT &&
+det_mon->section.d_timings.h_size != 0 &&
+det_mon->section.d_timings.v_size != 0) {
+
+p->output->mm_width = det_mon->section.d_timings.h_size;
+p->output->mm_height = det_mon->section.d_timings.v_size;
+p->ret =

[PATCH 4/5] handle extenion for detail timing block

2008-11-17 Thread ling . ma
From: MaLing <[EMAIL PROTECTED]>

---
 hw/xfree86/common/xf86Configure.c |   56 
 1 files changed, 31 insertions(+), 25 deletions(-)

diff --git a/hw/xfree86/common/xf86Configure.c 
b/hw/xfree86/common/xf86Configure.c
index 85cd5b6..9c805ac 100644
--- a/hw/xfree86/common/xf86Configure.c
+++ b/hw/xfree86/common/xf86Configure.c
@@ -583,6 +583,35 @@ configureMonitorSection (int screennum)
 return ptr;
 }
 
+static void handle_detailed_input(struct detailed_monitor_section *det_mon,
+  void *data)
+{
+XF86ConfMonitorPtr ptr = (XF86ConfMonitorPtr) data; 
+
+switch (det_mon->type) {
+case DS_NAME:
+ptr->mon_modelname = xf86confrealloc(ptr->mon_modelname, 
+ 
strlen((char*)(det_mon->section.name)) +
+ 1);
+strcpy(ptr->mon_modelname,
+ (char*)(det_mon->section.name));
+break;
+case DS_RANGES:
+ptr->mon_hsync[ptr->mon_n_hsync].lo =
+det_mon->section.ranges.min_h;
+ptr->mon_hsync[ptr->mon_n_hsync].hi =
+det_mon->section.ranges.max_h;
+ptr->mon_n_vrefresh = 1;
+ptr->mon_vrefresh[ptr->mon_n_hsync].lo =
+det_mon->section.ranges.min_v;
+ptr->mon_vrefresh[ptr->mon_n_hsync].hi =
+det_mon->section.ranges.max_v;
+ptr->mon_n_hsync++;
+default:
+break;
+}
+}
+
 static XF86ConfMonitorPtr
 configureDDCMonitorSection (int screennum)
 {
@@ -630,31 +659,8 @@ configureDDCMonitorSection (int screennum)
 }
 #endif /* def CONFIGURE_DISPLAYSIZE */
 
-for (i=0;i<4;i++) {
-   switch (ConfiguredMonitor->det_mon[i].type) {
-   case DS_NAME:
-   ptr->mon_modelname  = xf86confrealloc(ptr->mon_modelname, 
- strlen((char*)(ConfiguredMonitor->det_mon[i].section.name))
-   + 1);
-   strcpy(ptr->mon_modelname,
-  (char*)(ConfiguredMonitor->det_mon[i].section.name));
-   break;
-   case DS_RANGES:
-   ptr->mon_hsync[ptr->mon_n_hsync].lo =
-   ConfiguredMonitor->det_mon[i].section.ranges.min_h;
-   ptr->mon_hsync[ptr->mon_n_hsync].hi =
-   ConfiguredMonitor->det_mon[i].section.ranges.max_h;
-   ptr->mon_n_vrefresh = 1;
-   ptr->mon_vrefresh[ptr->mon_n_hsync].lo =
-   ConfiguredMonitor->det_mon[i].section.ranges.min_v;
-   ptr->mon_vrefresh[ptr->mon_n_hsync].hi =
-   ConfiguredMonitor->det_mon[i].section.ranges.max_v;
-   ptr->mon_n_hsync++;
-   default:
-   break;
-   }
-}
-
+xf86ForEachDetailedBlock(ConfiguredMonitor, handle_detailed_input,
+ ptr);
 if (ConfiguredMonitor->features.dpms) {
   ptr->mon_option_lst = xf86addNewOption(ptr->mon_option_lst, 
xstrdup("DPMS"), NULL);
 }
-- 
1.5.4.4

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


[PATCH 5/5] handle extenion for detail timing block

2008-11-17 Thread ling . ma
From: MaLing <[EMAIL PROTECTED]>

---
 hw/xfree86/ddc/print_edid.c |  252 +++
 1 files changed, 136 insertions(+), 116 deletions(-)

diff --git a/hw/xfree86/ddc/print_edid.c b/hw/xfree86/ddc/print_edid.c
index 0dd4bca..404b62d 100644
--- a/hw/xfree86/ddc/print_edid.c
+++ b/hw/xfree86/ddc/print_edid.c
@@ -333,123 +333,137 @@ print_detailed_timings(int scrnIndex, struct 
detailed_timings *t)
 }
 }
 
+struct det_print_parameter{
+  xf86MonPtr m;
+  int index;
+  ddc_quirk_t quirks;
+};
+
 static void
-print_detailed_monitor_section(int scrnIndex,
-  struct detailed_monitor_section *m)
+handle_detailed_print(struct detailed_monitor_section *det_mon,
+  void *data)
 {
-int i,j;
-  
-for (i=0;imin_v, r->max_v, r->min_h, r->max_h);
-   if (r->max_clock_khz != 0) {
-   xf86ErrorF(" PixClock max %i kHz\n", r->max_clock_khz);
-   if (r->maxwidth)
-   xf86DrvMsg(scrnIndex, X_INFO, "Maximum pixel width: %d\n",
-  r->maxwidth);
-   xf86DrvMsg(scrnIndex, X_INFO, "Supported aspect ratios:");
-   if (r->supported_aspect & SUPPORTED_ASPECT_4_3)
-   xf86ErrorF(" 4:3%s",
-   r->preferred_aspect == PREFERRED_ASPECT_4_3?"*":"");
-   if (r->supported_aspect & SUPPORTED_ASPECT_16_9)
-   xf86ErrorF(" 16:9%s",
-   r->preferred_aspect == PREFERRED_ASPECT_16_9?"*":"");
-   if (r->supported_aspect & SUPPORTED_ASPECT_16_10)
-   xf86ErrorF(" 16:10%s",
-   r->preferred_aspect == PREFERRED_ASPECT_16_10?"*":"");
-   if (r->supported_aspect & SUPPORTED_ASPECT_5_4)
-   xf86ErrorF(" 5:4%s",
-   r->preferred_aspect == PREFERRED_ASPECT_5_4?"*":"");
-   if (r->supported_aspect & SUPPORTED_ASPECT_15_9)
-   xf86ErrorF(" 15:9%s",
-   r->preferred_aspect == PREFERRED_ASPECT_15_9?"*":"");
-   xf86ErrorF("\n");
-   xf86DrvMsg(scrnIndex, X_INFO, "Supported blankings:");
-   if (r->supported_blanking & CVT_STANDARD)
-   xf86ErrorF(" standard");
-   if (r->supported_blanking & CVT_REDUCED)
-   xf86ErrorF(" reduced");
-   xf86ErrorF("\n");
-   xf86DrvMsg(scrnIndex, X_INFO, "Supported scalings:");
-   if (r->supported_scaling & SCALING_HSHRINK)
-   xf86ErrorF(" hshrink");
-   if (r->supported_scaling & SCALING_HSTRETCH)
-   xf86ErrorF(" hstretch");
-   if (r->supported_scaling & SCALING_VSHRINK)
-   xf86ErrorF(" vshrink");
-   if (r->supported_scaling & SCALING_VSTRETCH)
-   xf86ErrorF(" vstretch");
-   xf86ErrorF("\n");
-   xf86DrvMsg(scrnIndex, X_INFO, "Preferred refresh rate: %d\n",
-  r->preferred_refresh);
-   } else if (r->max_clock != 0) {
-   xf86ErrorF(" PixClock max %i MHz\n", r->max_clock);
-   } else {
-   xf86ErrorF("\n");
-   }
-   if (r->gtf_2nd_f > 0)
-   xf86DrvMsg(scrnIndex,X_INFO," 2nd GTF parameters: f: %i kHz "
-  "c: %i m: %i k %i j %i\n", r->gtf_2nd_f,
-  r->gtf_2nd_c, r->gtf_2nd_m, r->gtf_2nd_k,
-  r->gtf_2nd_j);
-   break;
-   }
-   case DS_STD_TIMINGS:
-   for (j = 0; j<5; j++) 
-   xf86DrvMsg(scrnIndex,X_INFO,"#%i: hsize: %i  vsize %i  refresh: 
%i  "
-  "vid: %i\n",i,m[i].section.std_t[i].hsize,
-  
m[i].section.std_t[j].vsize,m[i].section.std_t[j].refresh,
-  m[i].section.std_t[j].id);
-   break;
-   case DS_WHITE_P:
-   for (j = 0; j<2; j++)
-   if (m[i].section.wp[j].index != 0)
-   xf86DrvMsg(scrnIndex,X_INFO,
-  "White point %i: whiteX: %f, whiteY: %f; gamma: 
%f\n",
-  
m[i].section.wp[j].index,m[i].section.wp[j].white_x,
-  m[i].section.wp[j].white_y,
-  m[i].section.wp[j].white_gamma);
-   break;
-   case DS_CMD:
-   xf86DrvMsg(scrnIndex, X_INFO,
-  "Color management data: (not decoded)\n");
-   break;
-   case DS_CVT:
-   xf86DrvMsg(scrnIndex, X_INFO,
-  "CVT 3-byte-code modes:\n");
-   print_cvt_timings(scrnIndex, m[i].section.cvt);
-   break;
-   case DS_EST_III:
-   xf86DrvMsg(scrnIndex, X_INFO,
-  "Established timings III: (not decoded)\n");
-   break;
-   case DS_DUMMY:
-   default:
-  

Re: Ansification of X.Org code & other cleanup work

2008-11-17 Thread Peter Breitenlohner
On Mon, 20 Oct 2008, Alan Coopersmith wrote:

Hi Alan,

in your mail starting this thread

> If someone wanted to organize a "janitorial squad" to tackle these and help
> new people work through them to get to the point where they were ready for
> commit access, we'd love you forever (or at least until you turn us down
> when we then volunteer you to be the next release manager).

> [3] 122 open bugs, though many patches aren't keyworded:
> http://bugs.freedesktop.org/buglist.cgi?keywords=patch&product=Xorg&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED

you suggested to have a look at these bugs and patches. In the meantime I
have done just that.  There are quite a few easy ones (apply cleanly to
current git and are obvious changes such as strict ANSI C function
definitions, fixing some small bugs, and similar).

Here my recommendations:



app/viewres
bugid=18479 patchid=20208   apply

app/xf86dga
bugid=15084 patchid=15215   apply

app/xfd
bugid=18479 patchid=20209   apply

app/xfontsel
bugid=15083 patchid=15214   apply

app/xgc
bugid=18479 patchid=20210   apply

app/xkbprint
bugid=12790 patchid=12008   apply, maybe without commented old code
bugid=15078 patchid=15207   apply with --whitespace=fix

app/xkbutils
bugid=15077 patchid=15206   apply

app/xload
bugid=15075 patchid=15205   apply

app/xlsfonts
bugid=15073 patchid=15203   apply

app/xmessage
bugid=15072 patchid=15202   apply

app/xmh
bugid=15071 patchid=15201   apply
bugid=18479 patchid=20211   apply

app/xprop
bugid=16154 patchid=16813   apply

app/xsm
bugid=15066 patchid=15196   apply
bugid=18479 patchid=20212   apply



where bugid= refers to
http://bugs.freedesktop.org/show_bug.cgi?id=n
and patchid=n to
http://bugs.freedesktop.org/attachment.cgi?id=n



Note: patches 20208-20212 have been submitted by me.  They correct an
obvious build failure along the lines already applied to app/xedit, but
nevertheless they need independent review and approval.

Regards
Peter Breitenlohner <[EMAIL PROTECTED]>
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: Mouse button problems using Logitech NX80

2008-11-17 Thread Matija Šuklje
Dne ponedeljek 17. novembra 2008 je Peter Hutterer napisal(a):
> please file a bug report, because the information is getting spread across
> too many emails now and I'm losing track.

OK, will do.

> synaptics and evdev are two different drivers, so you probably have to file
> two separate bugs.

Sure.

> Don't forget to attach your Xorg.log. 
> Oh, and no pastebin links from bugzilla please, makes it harder to find all
> the stuff.

Of course not — on Bugzilla that'd be plain silly, if I can attach whole 
files ;)

Cheers,
Matija
-- 
gsm: +386 41 849 552
e-mail: [EMAIL PROTECTED]
www: http://matija.suklje.name

aim: hookofsilver
icq: 110183360
jabber/g-talk: [EMAIL PROTECTED]
msn: [EMAIL PROTECTED]
yahoo: matija_suklje
GPG/PGP fingerprint: FB64 FFAF B8DA 5AB5 B18A 98B8 2B68 0B51 0549 D278


signature.asc
Description: This is a digitally signed message part.
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

radeon driver again: GL_EXT_framebuffer_object?

2008-11-17 Thread Florian Echtler
Hello everyone,

an OpenGL-related question again: while the R300 driver from Mesa 7.2
doesn't seem to announce the GL_EXT_framebuffer_object extension on a
Mobility Radeon X1400, the glGenFramebuffersEXT function is present and
available in libGL.so. However, when calling it, I get a segfault at
0x, so I suspect that it's just an empty stub. Are there plans
to support this extension in the near future, or what would be necessary
to add support?

Many thanks,
Yours, Florian
-- 
0666 - Filemode of the Beast

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


help for install xorg server1.5

2008-11-17 Thread xiaoyu wu
hello everyone,

I have  contOS 5.2 , but when I finish installer OS in my HP dx2400, I cant
have graphique interface.
The version is Xorg-x11-server1.1.1-48.41.e15, I have find xorg-server 1.5
in wiki.X.org, but I dont know how to installer it.

somebody can help me ? thanks

merci

-- 
Que Dieu vous bénisse

Xiaoyu WU

00 33  6 07 11 80 92

If you are lucky enough to have lived in Paris as a young man, then wherever
you go for the rest of your life, it stays with you, for Paris is a moveable
feast."

你所经历的一切,只要那是真实的,那就是你生命中的财富。保存它们吧!保存坎坷,保存孤独,保存年轻,保存快乐,保存你生命中真实而独特的一切
-罗丹
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Making one multiseat user able to switch vts?

2008-11-17 Thread Kārlis Repsons
In general it would be nice, if multiseat workstation administrator could 
switch to vt[1-6]. Does Xorg support it somehow and is it possible to 
implement (well, kindly ask someone to do it) such option? Otherwise 
multiseat on Linux is quite problematic, if no ssh is used...
(also I noticed a problem with multiseat, that, if I stop X and login manager 
permanently, I still could not switch to any vts)

PS: sorry for duplication, but I wanted to write it more clearly.
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: X server 1.6 release schedule

2008-11-17 Thread Matthias Hopf
On Nov 14, 08 13:13:16 -0800, Keith Packard wrote:
> I volunteered to manage an X server 1.6 release, tentatively scheduled
> for the end of the year (yes, this year, 2008). This release will
> include DRI2 and RandR 1.3 support. I'd like to know how much of the new
> Xinput stuff will be ready in time.

Keith, AFAICS the standard properties do not inflict any changes on
server code except for the naming of the EDID data property, so I assume
that's fine here.
I'm unsure whether it would be wise to include panning support in 1.3,
even given that I manage to put it together until 11/24. I guess it
should settle in master first.
In that case, do we have any protocol changes in 1.3? Do we need a
version bump then?

Matthias

-- 
Matthias Hopf <[EMAIL PROTECTED]>  ____   __
Maxfeldstr. 5 / 90409 Nuernberg   (_   | |  (_   |__  [EMAIL PROTECTED]
Phone +49-911-74053-715   __)  |_|  __)  |__  R & D   www.mshopf.de
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: Switchable graphics

2008-11-17 Thread Matthew Garrett
On Sun, Nov 16, 2008 at 06:26:50PM +, [EMAIL PROTECTED] wrote:
> Hello,
> I just wanted to know if there are any plans to implement the switchable 
> graphics (also "hybrid graphics" and ati-
> specific: "PowerXpress") under Xorg. I'm not the first one asking this 
> question (http://lists.freedesktop.
> org/archives/xorg/2008-July/036961.html) but I just wondered if anyone has 
> any news about this topic since it has 
> appeared in this mailing list in July.

There are plans, but it involves significant work. We're still not 
entirely sure how to trigger the device handover, let alone do any of 
the X side of things.

-- 
Matthew Garrett | [EMAIL PROTECTED]
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: keysymdef.h has wrong implies symbol?

2008-11-17 Thread Erik Streb del Toro

James Cloos schrieb am 28.10.2008 00:21:

"Peter" == Peter Hutterer <[EMAIL PROTECTED]> writes:


Peter> well, that's the issue with the whole thing (Erik and me
Peter> discussed that a bit):

Peter> keysymdef.h states that XK_implies is U+21D2 RIGHTWARDS DOUBLE
Peter> ARROW.  in mathematics, this is the usual symbol for "implies".
Peter> however, according to http://unicode.org/charts/PDF/U2200.pdf (p
Peter> 207), "implies" is an alias for RIGHT TACK.

I wonder whether ⊢ is used for implies in APL?

Peter> As Erik pointed out, right tack does have its own symbol
Peter> XK_righttack, whereas there's no other XK_... for the double
Peter> right arrow.

Sounds like a good reason to go with the comment.  As does Erik’s note
that Gnome’s code follows the comment rather than the current imKStoUCS
implementation.

Has anyone audited imKStoUCS.c to see whether there are any other
discrepancies?

-JimC


I’m still waiting for the solution of this little problem. I searched 
for discrepancies in imKStoUCS.c but couldn’t find any.


So, will someone please commit the small bugfix? Or what are we waiting for?

Erik

--
GPG-Schlüssel-ID: 0x036B38E6
Fingerabdruck: F057 EEEB F0F5 9144 D95C BD98 B822 138F 036B 38E6

Außerdem kann man per Jabber mit mir reden (chatten):
Jabber-ID: [EMAIL PROTECTED]
Off-The-Record: DEBD08C2 95E7C8CE 901EC136 E39A1E43 4FC13142



signature.asc
Description: OpenPGP digital signature
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Re: radeon driver again: GL_EXT_framebuffer_object?

2008-11-17 Thread Alex Deucher
On Mon, Nov 17, 2008 at 4:47 AM, Florian Echtler <[EMAIL PROTECTED]> wrote:
> Hello everyone,
>
> an OpenGL-related question again: while the R300 driver from Mesa 7.2
> doesn't seem to announce the GL_EXT_framebuffer_object extension on a
> Mobility Radeon X1400, the glGenFramebuffersEXT function is present and
> available in libGL.so. However, when calling it, I get a segfault at
> 0x, so I suspect that it's just an empty stub. Are there plans
> to support this extension in the near future, or what would be necessary
> to add support?

Implementing that requires a proper memory manager and some driver
restructuring to take advantage of it.  Work on already underway.

Alex
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: radeon driver again: GL_EXT_framebuffer_object?

2008-11-17 Thread Corbin Simpson
Florian Echtler wrote:
> Hello everyone,
> 
> an OpenGL-related question again: while the R300 driver from Mesa 7.2
> doesn't seem to announce the GL_EXT_framebuffer_object extension on a
> Mobility Radeon X1400, the glGenFramebuffersEXT function is present and
> available in libGL.so. However, when calling it, I get a segfault at
> 0x, so I suspect that it's just an empty stub. Are there plans
> to support this extension in the near future, or what would be necessary
> to add support?

FYI, usually not a good idea to force extensions like that. :3

Um, yeah, FBO support is dependent upon better integration of our memory 
manager (GEM) with Mesa. It's being worked on, but I can't really say 
when it'll be ready.

~ C.
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


[PATCH] drop unused HAVE_LIBDRM_2_2

2008-11-17 Thread Rémi Cardona
Hi all,

Is there any reason not to apply this? airlied suggested I bring this
here for further review.

Thanks

-- 
Rémi Cardona
LRI, INRIA
[EMAIL PROTECTED]
[EMAIL PROTECTED]
>From 6afd482e905aec048333fdd72579a08478a3f27a Mon Sep 17 00:00:00 2001
From: =?utf-8?q?R=C3=A9mi=20Cardona?= <[EMAIL PROTECTED]>
Date: Mon, 17 Nov 2008 09:56:49 +0100
Subject: [PATCH] drop unused HAVE_LIBDRM_2_2

---
 configure.ac|3 ---
 include/dix-config.h.in |3 ---
 2 files changed, 0 insertions(+), 6 deletions(-)

diff --git a/configure.ac b/configure.ac
index 4bea8ac..3713050 100644
--- a/configure.ac
+++ b/configure.ac
@@ -861,9 +861,6 @@ AM_CONDITIONAL(DRI2, test "x$DRI2" == xyes)
 
 if test "x$DRI" = xyes || test "x$DRI2" = xyes; then
PKG_CHECK_MODULES([LIBDRM], [libdrm >= 2.3.0])
-   PKG_CHECK_EXISTS(libdrm >= 2.2.0,
-[AC_DEFINE([HAVE_LIBDRM_2_2], 1,
-[Has version 2.2 (or newer) of the drm library])])
AC_SUBST(LIBDRM_CFLAGS)
AC_SUBST(LIBDRM_LIBS)
 fi
diff --git a/include/dix-config.h.in b/include/dix-config.h.in
index 5739a05..65b5950 100644
--- a/include/dix-config.h.in
+++ b/include/dix-config.h.in
@@ -121,9 +121,6 @@
 /* Define to 1 if you have the  header file. */
 #undef HAVE_INTTYPES_H
 
-/* Define to 1 if you have version 2.2 (or newer) of the drm library */
-#undef HAVE_LIBDRM_2_2
-
 /* Have Quartz */
 #undef XQUARTZ
 
-- 
1.6.0.3

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Re: [patch 0/8] Patches for Cygwin/X

2008-11-17 Thread Jon TURNEY
Alan Hourihane wrote:
> On Thu, 2008-10-16 at 13:09 +0100, [EMAIL PROTECTED] wrote:
>> Updated series of patches to bring Cygwin/X up to date,
>> so it at least builds and minimally works
>>
>> Any comments on their correctness appreciated
> 
> Hi Jon,
> 
> They look reasonable to me, and if they gets things moving again, or the
> more welcome.
> 
> Maybe you should apply for git access to commit these and be able to
> maintain that code ??

Rebased and added to bugzilla as #18568, #18569, #18570 and #18571.

Sorry about the spam.

I forgot to say thanks to Janjaap Bos, Colin Harisson and Yaakov Selkowitz for
their help with these.

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: X server 1.6 release schedule

2008-11-17 Thread Keith Packard
On Mon, 2008-11-17 at 13:08 +0100, Matthias Hopf wrote:

> Keith, AFAICS the standard properties do not inflict any changes on
> server code except for the naming of the EDID data property, so I assume
> that's fine here.

Yup.

> I'm unsure whether it would be wise to include panning support in 1.3,
> even given that I manage to put it together until 11/24. I guess it
> should settle in master first.

I'd like to get panning into 1.6; it's not a huge driver change as we
already support all of the needed functionality.

> In that case, do we have any protocol changes in 1.3? Do we need a
> version bump then?

The projective transforms require a protocol bump, hence wanting to get
all of the RandR 1.3 protocol changes included.

-- 
[EMAIL PROTECTED]


signature.asc
Description: This is a digitally signed message part
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Re: X server 1.6 release schedule

2008-11-17 Thread Matthias Hopf
On Nov 17, 08 09:21:55 -0800, Keith Packard wrote:
> > I'm unsure whether it would be wise to include panning support in 1.3,
> > even given that I manage to put it together until 11/24. I guess it
> > should settle in master first.
> I'd like to get panning into 1.6; it's not a huge driver change as we
> already support all of the needed functionality.

Ok, then I'll try to fix up something until the end of the week.

> > In that case, do we have any protocol changes in 1.3? Do we need a
> > version bump then?
> The projective transforms require a protocol bump, hence wanting to get
> all of the RandR 1.3 protocol changes included.

... that happens if you read and reply to your emails top-down ;-)

Matthias

-- 
Matthias Hopf <[EMAIL PROTECTED]>  ____   __
Maxfeldstr. 5 / 90409 Nuernberg   (_   | |  (_   |__  [EMAIL PROTECTED]
Phone +49-911-74053-715   __)  |_|  __)  |__  R & D   www.mshopf.de
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Moving xkbcomp into the server

2008-11-17 Thread Dan Nicholson
I decided to take a crack at moving xkbcomp into the server so it's
not popen'd whenever a keymap is loaded. For the first crack, I'm
trying to just leave xkbcomp pretty much unchanged except for the
interface. What's causing me the most difficulty is converting to
server API. One snag I've hit is XStringToKeysym. Here's an example
usage in the xkbcomp parser:

int
LookupKeysym(char *str, KeySym * sym_rtrn)
{
KeySym sym;

if ((!str) || (uStrCaseCmp(str, "any") == 0)
|| (uStrCaseCmp(str, "nosymbol") == 0))
{
*sym_rtrn = NoSymbol;
return 1;
}
else if ((uStrCaseCmp(str, "none") == 0)
 || (uStrCaseCmp(str, "voidsymbol") == 0))
{
*sym_rtrn = XK_VoidSymbol;
return 1;
}
sym = XStringToKeysym(str);
if (sym != NoSymbol)
{
*sym_rtrn = sym;
return 1;
}
return 0;
}

Is there an equivalent API in the server to do this conversion?
Is this crazy/am I going about this the wrong way?
Any general suggestions for working on this?

--
Dan
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


[Patch 00/02] mieq threading prep

2008-11-17 Thread Jeremy Huddleston
These are two fairly straight forward patches I'd like to give someone  
a chance to comment on.  In a single-thread Xserver, these patches  
should have no effect, but they make the code a little more friendly  
to Tiago's threading patches (and this is what we're already doing in  
the apple branches).


The first only increments mieqEventQueue.tail when the data is in the  
queue (since mieqProcessInputEvents assumes the data to be valid if  
tail is incremented).


The second moves a hunk in mieqProcessInputEvents after we increment  
mieqEventQueue.head, so we don't need to hold a mutex for an  
unnecessarily.


If nobody pipes up in the next few days, I'll be pushing them into  
master.


Thanks,
Jeremy



smime.p7s
Description: S/MIME cryptographic signature
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

[Patch 01/02] mieq threading prep: Push screen-saver/DPMS handling to after the EQ pop operation.

2008-11-17 Thread Jeremy Huddleston
This way we on't need to hold the mutex during the dixSaveScreens()  
call.


Signed-off-by: Jeremy Huddleston <[EMAIL PROTECTED]>
Signed-off-by: Peter Hutterer <[EMAIL PROTECTED]>
---
 mi/mieq.c |   20 ++--
 1 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/mi/mieq.c b/mi/mieq.c
index 7437966..82f0c66 100644
--- a/mi/mieq.c
+++ b/mi/mieq.c
@@ -314,16 +314,6 @@ mieqProcessInputEvents(void)
  master = NULL;

 while (miEventQueue.head != miEventQueue.tail) {
-if (screenIsSaved == SCREEN_SAVER_ON)
-dixSaveScreens (serverClient, SCREEN_SAVER_OFF,  
ScreenSaverReset);

-#ifdef DPMSExtension
-else if (DPMSPowerLevel != DPMSModeOn)
-SetScreenSaverTimer();
-
-if (DPMSPowerLevel != DPMSModeOn)
-DPMSSet(serverClient, DPMSModeOn);
-#endif
-
 e = &miEventQueue.events[miEventQueue.head];

 /* GenericEvents always have nevents == 1 */
@@ -346,6 +336,16 @@ mieqProcessInputEvents(void)
 type= event->u.u.type;
 master  = (!dev->isMaster && dev->u.master) ? dev- 
>u.master : NULL;


+if (screenIsSaved == SCREEN_SAVER_ON)
+dixSaveScreens (serverClient, SCREEN_SAVER_OFF,  
ScreenSaverReset);

+#ifdef DPMSExtension
+else if (DPMSPowerLevel != DPMSModeOn)
+SetScreenSaverTimer();
+
+if (DPMSPowerLevel != DPMSModeOn)
+DPMSSet(serverClient, DPMSModeOn);
+#endif
+
 /* Custom event handler */
 handler = miEventQueue.handlers[type];

--
1.6.0.3




smime.p7s
Description: S/MIME cryptographic signature
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

[Patch 01/02] mieq threading prep: Only increment tail (push) when the event data is actually in the queue

2008-11-17 Thread Jeremy Huddleston
mi: Only increment tail (push) when the event data is actually in the  
queue


We don't want to increment tail until the data is already in place since
mieqProcessInputEvents assumes the data to be there if tail is  
incremented.


Signed-off-by: Jeremy Huddleston <[EMAIL PROTECTED]>
Signed-off-by: Peter Hutterer <[EMAIL PROTECTED]>
---
 mi/mieq.c |7 +++
 1 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/mi/mieq.c b/mi/mieq.c
index 52bb841..7437966 100644
--- a/mi/mieq.c
+++ b/mi/mieq.c
@@ -122,7 +122,7 @@ mieqResizeEvents(int min_size)
 void
 mieqEnqueue(DeviceIntPtr pDev, xEvent *e)
 {
-unsigned int   oldtail = miEventQueue.tail, newtail;
+unsigned int   oldtail = miEventQueue.tail;
 EventListPtr   evt;
 intisMotion = 0;
 intevlen;
@@ -170,11 +170,10 @@ mieqEnqueue(DeviceIntPtr pDev, xEvent *e)
 }
 else {
static int stuck = 0;
-   newtail = (oldtail + 1) % QUEUE_SIZE;
/* Toss events which come in late.  Usually this means your server's
  * stuck in an infinite loop somewhere, but SIGIO is still  
getting

  * handled. */
-   if (newtail == miEventQueue.head) {
+   if (((oldtail + 1) % QUEUE_SIZE) == miEventQueue.head) {
 ErrorF("[mi] EQ overflowing. The server is probably  
stuck "

"in an infinite loop.\n");
if (!stuck) {
@@ -184,7 +183,6 @@ mieqEnqueue(DeviceIntPtr pDev, xEvent *e)
return;
 }
stuck = 0;
-   miEventQueue.tail = newtail;
 }

 evlen = sizeof(xEvent);
@@ -218,6 +216,7 @@ mieqEnqueue(DeviceIntPtr pDev, xEvent *e)
 miEventQueue.events[oldtail].pDev = pDev;

 miEventQueue.lastMotion = isMotion;
+miEventQueue.tail = (oldtail + 1) % QUEUE_SIZE;
 }

 void
--
1.6.0.3




smime.p7s
Description: S/MIME cryptographic signature
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Re: Does libpciaccess support two screen share one pci clone mode ?

2008-11-17 Thread Ian Romanick
On Thu, Nov 13, 2008 at 02:52:19PM +0800, [EMAIL PROTECTED] wrote:
>  Recently I study libpciaccess, found pci_device_map_range
> function, when map the same physical address and the same size will
> failed. 
> 
> If we use libpciaccess and want support two screen which share one
> pci, they are mmio base is the same physical address and the size is the
> same, when the second screeninit goto pci_device_map_range, screeninit
> will failed.

If you're doing clone mode within one driver, you shouldn't need to map
the device twice.  Map the device memory once and use the same mapping from
both screens.  I'm pretty sure that's how other drviers do it.

Just out of curiosity, what device are you working on?

>  And we can not light device.
> 
>  I want to ask two question:
> 
>  1. Does the libpciaccess support two screen share one pci ?
> 
>  2. If not support, we can do same issue in 2D driver , but I
> can see the nv and intel do nothing , it confused me a lot.
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


freetype small reads

2008-11-17 Thread ☂Josh Chiα (谢任中)
Hi,

I notice that freetype is making a lot of small reads, resulting in a lot of
SYS_read calls.  I'm using freetype in an app that runs in a ptrace
sandbox.  Is there a simple way to prevent the numerous small reads (via
ft_ansi_stream_io), such as configuring freetype to prefetch into a large
buffer when reading, and making it assume that font files never change?  If
there is no simple config change, I'll probably have to do this with some
code change.  Thanks!

Josh
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Re: New Video Decode and Presentation API

2008-11-17 Thread Torgeir Veimo

On 15 Nov 2008, at 04:28, Andy Ritger wrote:

> I'm pleased to announce a new video API for Unix and Unix-like  
> platforms,
> and a technology preview implementation of this API from NVIDIA.

> * Defines an API for post-processing of decoded video, including
>   temporal and spatial deinterlacing, inverse telecine, and noise
>   reduction.

What about interlaced output and TV output. Is that still possible  
with this API?

Is field parity observed when outputting interlaced material? I think  
it's equally important to have good support for baseline mpeg2 in  
addition to other codecs, and this would imply that interlaced, field  
parity correct mpeg2 output on standard s-video / rgb should be fully  
working.

> * Defines an API for timestamp-based presentation of final video
>   frames.

This is interesting. Can such timestamps be synchronised with HDMI  
audio in some ways to guarantee judder free and audio resync free  
output? Ie, no need to resample audio to compensate for clock drift?

> * Defines an API for compositing sub-picture, on-screen display,
>   and other UI elements.

I assume this indicates that video can easily be used as textures for  
opengl surfaces, and that opengl surfaces (with alpha transparency  
support) can easily be superimposed over video output?

> These patches include changes against libavcodec, libavutil, ffmpeg,

> and MPlayer itself; they may serve as an example of how to use VDPAU.

Would it be possible to provide a standalone playback test program  
that illustrates the api usage outside of mplayer?

> If other hardware vendors are interested, they are welcome to also
> provide implementations of VDPAU.  The VDPAU API was designed to allow
> a vendor backend to be selected at run time.

It would be helpful to have an open source "no output" backend to  
allow compile & run test when supported hardware is not available.  
This would also help accelerate support for any software backend if  
anyone should choose to implement one.

> VC-1 support in NVIDIA's VDPAU implementation currently requires  
> GeForce
> 9300 GS, GeForce 9200M GS, GeForce 9300M GS, or GeForce 9300M GS.


So only mobile chipsets supports VC-1 output currently?

It seems that the marketplace seems to be missing a 9500 GT based gfx  
card with passive cooling, low form factor and hdmi enabled output...

-- 
Torgeir Veimo
[EMAIL PROTECTED]




___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: radeon driver again: GL_EXT_framebuffer_object?

2008-11-17 Thread Jerome Glisse
On Mon, 2008-11-17 at 10:47 +0100, Florian Echtler wrote:
> Hello everyone,
> 
> an OpenGL-related question again: while the R300 driver from Mesa 7.2
> doesn't seem to announce the GL_EXT_framebuffer_object extension on a
> Mobility Radeon X1400, the glGenFramebuffersEXT function is present and
> available in libGL.so. However, when calling it, I get a segfault at
> 0x, so I suspect that it's just an empty stub. Are there plans
> to support this extension in the near future, or what would be necessary
> to add support?
> 
> Many thanks,
> Yours, Florian


This extension needs memory manager, so it will be upstream once we get
kernel bits upstream maybe 2.6.29 but i would rather bet on 2.6.30.
Also you might be confuse with extension precense, what does report
glxinfo in OpenGL section (last section of glxinfo) if you see 
GL_EXT_framebuffer_object then it's a bug but i don't recall seeing
any code to expose this extension.

Cheers,
Jerome Glisse

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: Xorg over Linux Virtual Framebuffer Driver

2008-11-17 Thread Leandro Galvez
Anybody got any idea about this? Can anybody help me on this please?

Thanks and best regards,
Andy
  - Original Message - 
  From: Leandro Galvez 
  To: xorg@lists.freedesktop.org 
  Sent: Friday, November 14, 2008 10:24 AM
  Subject: Xorg over Linux Virtual Framebuffer Driver


  Hi All,

  I am trying to run Xorg on my arm target device. It has no physical 
display device so I just enabled linux virtual framebuffer. But when I try to 
run the xserver, I get the message "Cannot open virtual console 2". How do I 
get rid of this fatal error message preventing me to startup the Xorg server. 
Attached is my configuration file and the log file created by running "Xorg :98 
-nolisten tcp". Did I miss anything? Or is it just my command line? What is 
this virtual console anyway? Do I need to map my fb0 to it and how? By the way, 
I am not using Xvfb as I still want other applications to bypass the X window 
system. So I am using Linux virtual frame buffer module, not the Xvfb"
  I did turn on "Virtual Terminal" in my device drivers and I also enabled 
"Support for console on virtual terminal". By the way I am using the prebuilt 
binaries from armel debian release as building the X system from scratch 
together with all its required libraries requires a great amount of effort and 
time. 
  Below is my /etc/X11/xorg.conf configuration file and my system's output 
log. Hope you guys can help me out of this mess.

  Thanks and best regards,
  Andy

  CONFIG FILE
Section "Device" 
Identifier "FBDevice" 
Driver "fbdev" 
Option "fbdev" "/dev/fb0" 
EndSection 
Section "Screen" 
Identifier "FBScreen" 
Device "FBDevice" 
SubSection "Display" 
Modes "default" 
EndSubSection 
EndSection 
Section "ServerLayout" 
Identifier "FBLayout" 
Screen "FBScreen" 
EndSection

  LOG FILE
X.Org X Server 1.4.2 
Release Date: 11 June 2008 
X Protocol Version 11, Revision 0 
Build Operating System: Linux Debian (xorg-server 2:1.4.2-7) 
Current Operating System: Linux (none) 2.6.16.11-v012 #2 Tue Nov 4 09:48:40 
SGT 2008 armv4tl 
Build Date: 30 September 2008 04:47:25PM 
Before reporting problems, check http://wiki.x.org 
to make sure that you have the latest version. 
Module Loader present 
Markers: (--) probed, (**) from config file, (==) default setting, 
(++) from command line, (!!) notice, (II) informational, 
(WW) warning, (EE) error, (NI) not implemented, (??) unknown. 
(==) Log file: "/var/log/Xorg.0.log", Time: Thu Jan 1 00:30:28 1970 
(==) Using config file: "/etc/X11/xorg.conf" 
(==) ServerLayout "FBLayout" 
(**) |-->Screen "FBScreen" (0) 
(**) | |-->Monitor "" 
(**) | |-->Device "FBDevice" 
(==) No monitor specified for screen "FBScreen". 
Using a default monitor configuration. 
(==) Automatically adding devices 
(==) Automatically enabling devices 
(==) No FontPath specified. Using compiled-in default. 
(WW) The directory "/usr/share/fonts/X11/cyrillic" does not exist. 
Entry deleted from font path. 
(WW) The directory "/var/lib/defoma/x-ttcidfont-conf.d/dirs/TrueType" does 
not exist. 
Entry deleted from font path. 
(==) FontPath set to: 
/usr/share/fonts/X11/misc, 
/usr/share/fonts/X11/100dpi/:unscaled, 
/usr/share/fonts/X11/75dpi/:unscaled, 
/usr/share/fonts/X11/Type1, 
/usr/share/fonts/X11/100dpi, 
/usr/share/fonts/X11/75dpi 
(==) RgbPath set to "/etc/X11/rgb" 
(==) ModulePath set to "/usr/lib/xorg/modules" 
(==) |-->Input Device "" 
(==) |-->Input Device "" 
(==) The core pointer device wasn't specified explicitly in the layout. 
Using the default mouse configuration. 
(==) The core keyboard device wasn't specified explicitly in the layout. 
Using the default keyboard configuration. 
(II) No APM support in BIOS or kernel 
(II) Loader magic: 0x1a0564 
(II) Module ABI versions: 
X.Org ANSI C Emulation: 0.3 
X.Org Video Driver: 2.0 
X.Org XInput driver : 2.0 
X.Org Server Extension : 0.3 
X.Org Font Renderer : 0.5 
(II) Loader running on linux 
(II) LoadModule: "pcidata" 
(II) Loading /usr/lib/xorg/modules//libpcidata.so 
(II) Module pcidata: vendor="X.Org Foundation" 
compiled for 1.4.2, module version = 1.0.0 
ABI class: X.Org Video Driver, version 2.0 
(--) using VT number 2 
Fatal server error: 
xf86OpenConsole: Cannot open virtual console 2 (No such file or directory) 
(WW) xf86CloseConsole: KDSETMODE failed: Bad file descriptor 
(WW) xf86CloseConsole: VT_GETMODE failed: Bad file descriptor


--


  ___
  xorg mailing list
  xorg@lists.freedesktop.org
  http://lists.freedesktop.org/mailman/listinfo/xorg___

答复: Does libpciaccess support two scre en share one pci clone mode ?

2008-11-17 Thread FloraGui
Dear Ian:
Thanks a lot for your reply!

>If you're doing clone mode within one driver, you shouldn't need to map
>the device twice.  Map the device memory once and use the same mapping from
>both screens.  I'm pretty sure that's how other drviers do it.

Now I use this method to realize it which is just as you said I judge it if I 
mapped once, I will not map it.But I saw the open source of Intel and NV and 
see they place pci_device_map_range(mmio map) in ScreenInit stage. In old xorg 
structure, if in clone mode xorg call ScreenInit twice, we use old structure, 
so if we do nothing here, it will call pci_device_map_range(mmio map)twice and 
the second time will failed . I thought because Intel use xrandr12 structure 
the xorg don't all ScreenInit twice in the clone mode.The xrandr12 always 
build-in
one screen. Ps: I work on S3 Graphic.

-邮件原件-
发件人: Ian Romanick [mailto:[EMAIL PROTECTED] 
发送时间: 2008年11月18日 2:38
收件人: Flora Gui
抄送: xorg@lists.freedesktop.org
主题: Re: Does libpciaccess support two screen share one pci clone mode ?

On Thu, Nov 13, 2008 at 02:52:19PM +0800, [EMAIL PROTECTED] wrote:
>  Recently I study libpciaccess, found pci_device_map_range
> function, when map the same physical address and the same size will
> failed. 
> 
> If we use libpciaccess and want support two screen which share one
> pci, they are mmio base is the same physical address and the size is the
> same, when the second screeninit goto pci_device_map_range, screeninit
> will failed.

If you're doing clone mode within one driver, you shouldn't need to map
the device twice.  Map the device memory once and use the same mapping from
both screens.  I'm pretty sure that's how other drviers do it.

Just out of curiosity, what device are you working on?

>  And we can not light device.
> 
>  I want to ask two question:
> 
>  1. Does the libpciaccess support two screen share one pci ?
> 
>  2. If not support, we can do same issue in 2D driver , but I
> can see the nv and intel do nothing , it confused me a lot.

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Re: Making one multiseat user able to switch vts?

2008-11-17 Thread Tiago Vignatti
Kārlis Repsons escreveu:
> In general it would be nice, if multiseat workstation administrator could 
> switch to vt[1-6]. Does Xorg support it somehow and is it possible to 
> implement (well, kindly ask someone to do it) such option? Otherwise 
> multiseat on Linux is quite problematic, if no ssh is used...
> (also I noticed a problem with multiseat, that, if I stop X and login manager 
> permanently, I still could not switch to any vts)

It's complicated. Pci-rework, through libpciaccess, cleaned a bunch of 
the mess that was living in our server. But that wasn't good enough. 
There's still some issues that don't let us remove the entirely Xorg's 
pci layer. And this is the point: there's a lot of pci users (VGACon, 
framebuffer, Xorg and possibly others) in your system fighting for the 
same piece of hardware.

With lucky the kernel based modesetting will let the code more nice and 
trivial to remove more things from the server. Dave obtained a very nice 
demo recently starting a rootless X server.


Cheers,

PS: really, try to avoid a single machine to configure your multiseat 
box. No one needs text console in our current world.

-- 
Tiago Vignatti
C3SL - Centro de Computação Científica e Software Livre
www.c3sl.ufpr.br
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg

Re: Xephyr - Attach specific mouse device without be root?

2008-11-17 Thread Tiago Vignatti
Hi,

[EMAIL PROTECTED] escreveu:
> Hi all,
> I'm playing with using Xephyr for an emulator for the Sugar desktop (OLPC
> stuff), I am attempting to bind a specific mouse (event input) to Xephyr
> and made had some progress.
> 
> However the biggest problem is that I can only get Xephyr to attach an
> event source if it is run as root. ie:
> --
> sudo Xephyr :5 -mouse evdev,,device=/dev/input/event8
> --
> 
> Is there a method to allow this without being root, but instead as a
> normal user without root permissions?

I don't want that others users on the system see everything that I'm 
typing. But if you really want that feature, you can play with udev (or 
{Console, Device}Kit?) and create some rules for a given group.


> As expected the permissions of the event device are 'crw-rw 1 root
> root 13, 71 2008-11-12 21:24 /dev/input/event8'
> 
> [It seems that a normal Xserver can achieve this when run directly from a
> VT, so why not for Xephyr?]

note that you have to get superuser permissions to start a normal X server.


Cheers,

-- 
Tiago Vignatti
C3SL - Centro de Computação Científica e Software Livre
www.c3sl.ufpr.br
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: XDMCP / Gnome / KDE / Data transfer / GTK QT / Xorg extension

2008-11-17 Thread Tiago Vignatti
Hi,

Jean-Francois Bouchard escreveu:
> Problem :
> We experience very slow scroll speed (lets say, cat /var/log/messages)
> in Gnome terminal via XDMCP. (1M file : 1.5 minute to display)




> Setup :
> On the thin client we use ...
> X Protocol Version 11, Revision 0, Release 7.1.1
> Kernel 2.6.18-92.1.17.el5
> Gnome 2.16
> KDE 3.5

AFAIK, there's no such reason to keep the X clients (Gnome, KDE and etc) 
on thin-client side.


> Question :
> We would like to know if this could be related to X protocol or the way
> things are handled when drawing text ?

If that is easy, you could simply start the same applications on the 
server, displaying it locally and see if is a network problem.


Cheers,

-- 
Tiago Vignatti
C3SL - Centro de Computação Científica e Software Livre
www.c3sl.ufpr.br
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: XDMCP and NAT

2008-11-17 Thread Tiago Vignatti
Hi,

Ritesh Sood escreveu:
> Hi all,
> 
> This mail is more of a feature request, and looking at the number of
> messages on the web, I'm sure quite a number of users would be happy to
> have this functionality, which is already provided by many commercial
> Xservers for windows.
> 
> I want use Xephyr/Xnest on my home machine local_host (as display :1)
> and have the display controlled by xdm running on a remote application server
> (app_server)
> 
> First, please have your browser's font set to a monospaced one so that
> the boxes below are displayed correctly.
> 
> Here's how the network "topology" looks like.
> 
> +---+ ++  +-+
> | local_host| | NAT server |  | app_server  |
> | 192.168.0.100 |---> | 1.2.3.4|->| 5.6.7.8 |
> | running Xnest | ||  | my.univ.edu |
>   |   my.univ.edu  |
> | on display :1 | | my.isp.com |  | running xdm |
> +---+ ++  +-+
> 
> At the app_server end, Xaccess contains
> *.univ.edu   NOBROADCAST
> *.isp.com   NOBROADCAST
> to have some measure of security
> 
> I'm running xdm as
> # xdm -debug 1 -config 
> 
> Within the university network of-course, things work very well. From
> local_host too, at-least XDMCP authentication is happening correctly,
> i.e. xdm sees that the incoming request is from *.isp.com. and considers
> it legitimate.
> 
> Next, it tries to open 192.168.0.100:1 for login window, etc; and that
> of-course fails.
> 
> Just to make sure that port forwarding on 60xx ports is happening correctly,
> I do
> $ xterm -display my.isp.com:1.0
> and that works alright.
> 
> As i mentioned above, many Xserver implementations for windows provide
> an option so that the NAT IP address can be passed to xdm instead of
> XDMCP picking up the local_host address by default. See these FAQs, for
> instance:
> http://connectivity.hummingbird.com/support/nc/exceed/exc9003009.html?cks=y
> http://www.netsarang.com/products/xmg_faq.html
> 
> It would be great if we could have similar functionality in the Xorg
> Xservers.

Yeah, I would like this kind of feature some time ago as well but seems 
that our world is finally (not so quickly though) turning to IPv6 [0]. 
There would be another crazy idea to traverse NAT using hole punching 
technique. Follow this link:

http://vignatti.wordpress.com/2008/03/21/traversing-x11-clients-behind-nat-or-x11-end-to-end-connectivity/


[0] people found another motivation besides the lack of address space 
which is the energy saving. Seems that NAT must send a "keep alive" 
message every 30-180 seconds to keep the address and connection active. 
It can consume a significant amount of energy, specially for mobile devices.


Cheers,

-- 
Tiago Vignatti
C3SL - Centro de Computação Científica e Software Livre
www.c3sl.ufpr.br
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: [Patch 00/02] mieq threading prep

2008-11-17 Thread Tiago Vignatti
Hi,

Jeremy Huddleston escreveu:
> These are two fairly straight forward patches I'd like to give someone a 
> chance to comment on.  In a single-thread Xserver, these patches should 
> have no effect, but they make the code a little more friendly to Tiago's 
> threading patches (and this is what we're already doing in the apple 
> branches).

As I already pointed in other occasion, I'm not comfortable enough to 
push my series of patches to upstream right now. Peter told in a private 
mail that feels the same.

There are probably more critical regions (regarding input properties 
mainly) that I'm not covered yet (and I'm in the process to finish my 
masters soon. So I don't want to put another piano on my head). 
Therefore the real reason now to not push upstream is very simple: 
multi-thread programming sucks.


> The first only increments mieqEventQueue.tail when the data is in the 
> queue (since mieqProcessInputEvents assumes the data to be valid if tail 
> is incremented).
> 
> The second moves a hunk in mieqProcessInputEvents after we increment 
> mieqEventQueue.head, so we don't need to hold a mutex for an unnecessarily.
> 
> If nobody pipes up in the next few days, I'll be pushing them into master.

Right. If the patches are useful for xquartz then both are justified 
even in the lack of xorg's input thread:

 Signed-off-by: Tiago Vignatti <[EMAIL PROTECTED]>


Thanks Jeremy,

-- 
Tiago Vignatti
C3SL - Centro de Computação Científica e Software Livre
www.c3sl.ufpr.br
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: Moving xkbcomp into the server

2008-11-17 Thread Peter Hutterer
On Mon, Nov 17, 2008 at 11:25:25AM -0800, Dan Nicholson wrote:
> I decided to take a crack at moving xkbcomp into the server so it's
> not popen'd whenever a keymap is loaded. For the first crack, I'm
> trying to just leave xkbcomp pretty much unchanged except for the
> interface. What's causing me the most difficulty is converting to
> server API. One snag I've hit is XStringToKeysym. Here's an example
> usage in the xkbcomp parser:

As much as I'd like to see it in the server - is the popen the painful bit?
AFAIU, the current approach goes from RMLVO to Kkcstg to xkb to xkm, every
time we call InitKeyboardDeviceStruct.

Ideally, we'd like to cache and re-use as much as possible. Usually, all
keyboards come up with the same map anyway and compiling it again is
redundant. Just doing that might already save a significant chunk of time.
This should also be much easier to achieve, and if it provides a relevant
speedup it would be great as interim solution.

So the path is
XkbInitKeyboardDeviceStruct:xkb/xkbInit.c
  -> XkbDDXNamesFromRules:xkb/ddxLoad.c
this is where all the rules parsing happens, skipping that may save
time.
  -> XkbDDXLoadKeymapByNames:xkb/ddxLoad.c
this is where xkbcomp is called with the Kcstg format. xkbcomp now
parses that into an xkm format
  -> XkmReadFile:xkb/xkmread.c
here we read in the compiled keymap and basically copy it into the
internal structs.

Cheers,
  Peter
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: Moving xkbcomp into the server

2008-11-17 Thread Alan Coopersmith
Dan Nicholson wrote:
> One snag I've hit is XStringToKeysym. 
> 
> Is there an equivalent API in the server to do this conversion?

I haven't checked if there's one added now, but I know our Xsun
pre-xkb keytable parser linked in a copy of the ks_tables.h file
built in the libX11 build and included a static copy of the
XStringToKeysym function to do the lookups in it.

I wonder if going forward, moving XStringToKeysym into a separate
library, or putting equivalent functionality in something like
libxcb-keysyms wouldn't be a better way, to reduce duplication of
this data/parsing code needed by users of both libX11 & libxcb,
and the Xserver itself.

-- 
-Alan Coopersmith-   [EMAIL PROTECTED]
 Sun Microsystems, Inc. - X Window System Engineering

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: Moving xkbcomp into the server

2008-11-17 Thread Paulo César Pereira de Andrade
> On Mon, Nov 17, 2008 at 11:25:25AM -0800, Dan Nicholson wrote:
>> I decided to take a crack at moving xkbcomp into the server so it's
>> not popen'd whenever a keymap is loaded. For the first crack, I'm
>> trying to just leave xkbcomp pretty much unchanged except for the
>> interface. What's causing me the most difficulty is converting to
>> server API. One snag I've hit is XStringToKeysym. Here's an example
>> usage in the xkbcomp parser:
>
> As much as I'd like to see it in the server - is the popen the painful
> bit?

  We are currently using the patch I posted sometime ago, to cache
xkbcomp output on some oems. It should make significant difference
on "low profile" computers. But I did not extend it to not not do
things like parse the geometry, to avoid breaking the specs. But
I think it is not quite reliably anymore... At least libxkbui
was not working with xorgcfg for some time, and xorgcfg was probably
the only client using it.

  But really, some major cleanup should be done. The geometry code
I believe is more then half of the computing time, and not used.
Also, the caching should be done on a more higher layer, instead
of sha1'ing all the xkbmap, it should sha1 just the
"main descriptions", like the "setxkbmap -print" output.

> AFAIU, the current approach goes from RMLVO to Kkcstg to xkb to xkm, every
> time we call InitKeyboardDeviceStruct.
>
> Ideally, we'd like to cache and re-use as much as possible. Usually, all
> keyboards come up with the same map anyway and compiling it again is
> redundant. Just doing that might already save a significant chunk of time.
> This should also be much easier to achieve, and if it provides a relevant
> speedup it would be great as interim solution.
>
> So the path is
> XkbInitKeyboardDeviceStruct:xkb/xkbInit.c
>   -> XkbDDXNamesFromRules:xkb/ddxLoad.c
> this is where all the rules parsing happens, skipping that may
> save
> time.
>   -> XkbDDXLoadKeymapByNames:xkb/ddxLoad.c
> this is where xkbcomp is called with the Kcstg format. xkbcomp now
> parses that into an xkm format
>   -> XkmReadFile:xkb/xkmread.c
> here we read in the compiled keymap and basically copy it into the
> internal structs.
>
> Cheers,
>   Peter

Paulo

___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg


Re: XDMCP / Gnome / KDE / Data transfer / GTK QT / Xorg extension

2008-11-17 Thread Rémi Cardona
Le 12/11/2008 22:54, Jean-Francois Bouchard a écrit :
> Problem :
> We experience very slow scroll speed (lets say, cat /var/log/messages)
> in Gnome terminal via XDMCP. (1M file : 1.5 minute to display)

[snip]

> On the fat server ...
> X Protocol Version 11, Revision 0, Release 6.8.2
> Kernel 2.6.9-78.0.1.ELsmp
> Gnome 2.8
 ^^^

That's your problem, right there. vte as shipped in Gnome 2.8 was very 
slow and major performance profiling was done later on (in 2.12 or 2.14, 
I'm not sure).

You should definitely try to update your "fat server" to a more recent 
Gnome stack. It might not be perfect and there's probably some more room 
left for improvements, but I think you'll find it a huge win over what 
you are currently using.

Cheers

Rémi Cardona
___
xorg mailing list
xorg@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/xorg