Hi,

I have attached two patches for the Hyphen library and the English
hyphenation patterns, also a test document.
(The first test is not yet resolved, see my next letter.)

The Hyphen patch fixes the bad hyphenation of words with hyphens (a
new bug of OpenOffice.org) and apostrophes for all languages. Old
partial (string replacement at hyphenation) of LibreOffice's English
hyphenation patterns or full solutions (explicite NOHYPHEN attribute)
are unnecessary, removed by the another patch.

Best regards,
Laci
From 722c5dc25ad32a7878f7b1d00f2864789c4081d3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?L=C3=A1szl=C3=B3=20N=C3=A9meth?= <nem...@numbertext.org>
Date: Fri, 7 Oct 2011 16:03:06 +0200
Subject: [PATCH] Fixes of Hyphen 2.8.2 (words with hyphens, numbers and use min. def. hyphenmin)

---
 hyphen/hyphen-2.7.1-2.8.2.patch |  325 +++++++++++++++++++++++++++++++++++++++
 hyphen/makefile.mk              |    3 +-
 2 files changed, 327 insertions(+), 1 deletions(-)
 create mode 100644 hyphen/hyphen-2.7.1-2.8.2.patch

diff --git a/hyphen/hyphen-2.7.1-2.8.2.patch b/hyphen/hyphen-2.7.1-2.8.2.patch
new file mode 100644
index 0000000..912fba7
--- /dev/null
+++ b/hyphen/hyphen-2.7.1-2.8.2.patch
@@ -0,0 +1,325 @@
+--- misc/build/hyphen-2.7.1/hyphen.c.old	2011-10-07 15:51:25.883686906 +0200
++++ misc/build/hyphen-2.7.1/hyphen.c	2011-10-07 15:51:59.363686900 +0200
+@@ -242,99 +242,45 @@
+ }
+ #endif
+ 
+-HyphenDict *
+-hnj_hyphen_load (const char *fn)
+-{
+-  HyphenDict *dict[2];
+-  HashTab *hashtab;
+-  FILE *f;
+-  char buf[MAX_CHARS];
++void hnj_hyphen_load_line(char * buf, HyphenDict * dict, HashTab * hashtab) {
++  int i, j;
+   char word[MAX_CHARS];
+   char pattern[MAX_CHARS];
+   char * repl;
+   signed char replindex;
+   signed char replcut;
+-  int state_num = 0, last_state;
+-  int i, j, k;
++  int state_num = 0;
++  int last_state;
+   char ch;
+   int found;
+-  HashEntry *e;
+-  int nextlevel = 0;
+-
+-  f = fopen (fn, "r");
+-  if (f == NULL)
+-    return NULL;
+ 
+-// loading one or two dictionaries (separated by NEXTLEVEL keyword)
+-for (k = 0; k == 0 || (k == 1 && nextlevel); k++) { 
+-  hashtab = hnj_hash_new ();
+-#ifdef VERBOSE
+-  global = hashtab;
+-#endif
+-  hnj_hash_insert (hashtab, "", 0);
+-  dict[k] = hnj_malloc (sizeof(HyphenDict));
+-  dict[k]->num_states = 1;
+-  dict[k]->states = hnj_malloc (sizeof(HyphenState));
+-  dict[k]->states[0].match = NULL;
+-  dict[k]->states[0].repl = NULL;
+-  dict[k]->states[0].fallback_state = -1;
+-  dict[k]->states[0].num_trans = 0;
+-  dict[k]->states[0].trans = NULL;
+-  dict[k]->nextlevel = NULL;
+-  dict[k]->lhmin = 0;
+-  dict[k]->rhmin = 0;
+-  dict[k]->clhmin = 0;
+-  dict[k]->crhmin = 0;
+-  dict[k]->nohyphen = NULL;
+-  dict[k]->nohyphenl = 0;
+-
+-  /* read in character set info */
+-  if (k == 0) {
+-    for (i=0;i<MAX_NAME;i++) dict[k]->cset[i]= 0;
+-    fgets(dict[k]->cset,  sizeof(dict[k]->cset),f);
+-    for (i=0;i<MAX_NAME;i++)
+-      if ((dict[k]->cset[i] == '\r') || (dict[k]->cset[i] == '\n'))
+-        dict[k]->cset[i] = 0;
+-    dict[k]->utf8 = (strcmp(dict[k]->cset, "UTF-8") == 0);
+-  } else {
+-    strcpy(dict[k]->cset, dict[0]->cset);
+-    dict[k]->utf8 = dict[0]->utf8;
+-  }
+-
+-  while (fgets (buf, sizeof(buf), f) != NULL)
+-    {
+-      if (buf[0] != '%')
+-	{
+-	  if (strncmp(buf, "NEXTLEVEL", 9) == 0) {
+-	    nextlevel = 1;
+-	    break;
+-	  } else if (strncmp(buf, "LEFTHYPHENMIN", 13) == 0) {
+-	    dict[k]->lhmin = atoi(buf + 13);
+-	    continue;
++	  if (strncmp(buf, "LEFTHYPHENMIN", 13) == 0) {
++	    dict->lhmin = atoi(buf + 13);
++	    return;
+ 	  } else if (strncmp(buf, "RIGHTHYPHENMIN", 14) == 0) {
+-	    dict[k]->rhmin = atoi(buf + 14);
+-	    continue;
++	    dict->rhmin = atoi(buf + 14);
++	    return;
+ 	  } else if (strncmp(buf, "COMPOUNDLEFTHYPHENMIN", 21) == 0) {
+-	    dict[k]->clhmin = atoi(buf + 21);
+-	    continue;
++	    dict->clhmin = atoi(buf + 21);
++	    return;
+ 	  } else if (strncmp(buf, "COMPOUNDRIGHTHYPHENMIN", 22) == 0) {
+-	    dict[k]->crhmin = atoi(buf + 22);
+-	    continue;
++	    dict->crhmin = atoi(buf + 22);
++	    return;
+ 	  } else if (strncmp(buf, "NOHYPHEN", 8) == 0) {
+ 	    char * space = buf + 8;
+ 	    while (*space != '\0' && (*space == ' ' || *space == '\t')) space++;
+-	    if (*buf != '\0') dict[k]->nohyphen = hnj_strdup(space);
+-	    if (dict[k]->nohyphen) {
+-	        char * nhe = dict[k]->nohyphen + strlen(dict[k]->nohyphen) - 1;
++	    if (*buf != '\0') dict->nohyphen = hnj_strdup(space);
++	    if (dict->nohyphen) {
++	        char * nhe = dict->nohyphen + strlen(dict->nohyphen) - 1;
+ 	        *nhe = 0;
+-	        for (nhe = nhe - 1; nhe > dict[k]->nohyphen; nhe--) {
++	        for (nhe = nhe - 1; nhe > dict->nohyphen; nhe--) {
+ 	                if (*nhe == ',') {
+-	                    dict[k]->nohyphenl++;
++	                    dict->nohyphenl++;
+ 	                    *nhe = 0;
+ 	                }
+ 	        }
+ 	    }
+-	    continue;
++	    return;
+ 	  } 
+ 	  j = 0;
+ 	  pattern[j] = '0';
+@@ -379,7 +325,7 @@
+           } else {
+             if (*word == '.') i++;
+             /* convert UTF-8 char. positions of discretionary hyph. replacements to 8-bit */
+-            if (dict[k]->utf8) {
++            if (dict->utf8) {
+                 int pu = -1;        /* unicode character position */
+                 int ps = -1;        /* unicode start position (original replindex) */
+                 int pc = (*word == '.') ? 1: 0; /* 8-bit character position */
+@@ -403,14 +349,14 @@
+ 	  printf ("word %s pattern %s, j = %d  repl: %s\n", word, pattern + i, j, repl);
+ #endif
+ 	  found = hnj_hash_lookup (hashtab, word);
+-	  state_num = hnj_get_state (dict[k], hashtab, word);
+-	  dict[k]->states[state_num].match = hnj_strdup (pattern + i);
+-	  dict[k]->states[state_num].repl = repl;
+-	  dict[k]->states[state_num].replindex = replindex;
++	  state_num = hnj_get_state (dict, hashtab, word);
++	  dict->states[state_num].match = hnj_strdup (pattern + i);
++	  dict->states[state_num].repl = repl;
++	  dict->states[state_num].replindex = replindex;
+           if (!replcut) {
+-            dict[k]->states[state_num].replcut = (signed char) strlen(word);
++            dict->states[state_num].replcut = (signed char) strlen(word);
+           } else {
+-            dict[k]->states[state_num].replcut = replcut;
++            dict->states[state_num].replcut = replcut;
+           }
+ 
+ 	  /* now, put in the prefix transitions */
+@@ -420,11 +366,81 @@
+ 	      ch = word[j - 1];
+ 	      word[j - 1] = '\0';
+ 	      found = hnj_hash_lookup (hashtab, word);
+-	      state_num = hnj_get_state (dict[k], hashtab, word);
+-	      hnj_add_trans (dict[k], state_num, last_state, ch);
++	      state_num = hnj_get_state (dict, hashtab, word);
++	      hnj_add_trans (dict, state_num, last_state, ch);
+ 	    }
+-	}
++}
++
++HyphenDict *
++hnj_hyphen_load (const char *fn)
++{
++  HyphenDict *dict[2];
++  HashTab *hashtab;
++  FILE *f;
++  char buf[MAX_CHARS];
++  int nextlevel = 0;
++  int i, j, k;
++  HashEntry *e;
++  int state_num = 0;
++
++  f = fopen (fn, "r");
++  if (f == NULL)
++    return NULL;
++
++// loading one or two dictionaries (separated by NEXTLEVEL keyword)
++for (k = 0; k < 2; k++) { 
++  hashtab = hnj_hash_new ();
++#ifdef VERBOSE
++  global = hashtab;
++#endif
++  hnj_hash_insert (hashtab, "", 0);
++  dict[k] = hnj_malloc (sizeof(HyphenDict));
++  dict[k]->num_states = 1;
++  dict[k]->states = hnj_malloc (sizeof(HyphenState));
++  dict[k]->states[0].match = NULL;
++  dict[k]->states[0].repl = NULL;
++  dict[k]->states[0].fallback_state = -1;
++  dict[k]->states[0].num_trans = 0;
++  dict[k]->states[0].trans = NULL;
++  dict[k]->nextlevel = NULL;
++  dict[k]->lhmin = 0;
++  dict[k]->rhmin = 0;
++  dict[k]->clhmin = 0;
++  dict[k]->crhmin = 0;
++  dict[k]->nohyphen = NULL;
++  dict[k]->nohyphenl = 0;
++
++  /* read in character set info */
++  if (k == 0) {
++    for (i=0;i<MAX_NAME;i++) dict[k]->cset[i]= 0;
++    fgets(dict[k]->cset,  sizeof(dict[k]->cset),f);
++    for (i=0;i<MAX_NAME;i++)
++      if ((dict[k]->cset[i] == '\r') || (dict[k]->cset[i] == '\n'))
++        dict[k]->cset[i] = 0;
++    dict[k]->utf8 = (strcmp(dict[k]->cset, "UTF-8") == 0);
++  } else {
++    strcpy(dict[k]->cset, dict[0]->cset);
++    dict[k]->utf8 = dict[0]->utf8;
++  }
++
++  if (k == 0 || nextlevel) {
++    while (fgets (buf, sizeof(buf), f) != NULL) {
++      if (strncmp(buf, "NEXTLEVEL", 9) == 0) {
++	nextlevel = 1;
++	break;
++      } else if (buf[0] != '%') hnj_hyphen_load_line(buf, dict[k], hashtab);
+     }
++  } else if (k == 1) {
++    /* default first level: hyphen and ASCII apostrophe */
++    if (!dict[0]->utf8) hnj_hyphen_load_line("NOHYPHEN -,'\n", dict[k], hashtab);
++    else hnj_hyphen_load_line("NOHYPHEN -,',\xe2\x80\x93,\xe2\x80\x99\n", dict[k], hashtab);
++    hnj_hyphen_load_line("1-1\n", dict[k], hashtab); /* hyphen */
++    hnj_hyphen_load_line("1'1\n", dict[k], hashtab); /* ASCII apostrophe */
++    if (dict[0]->utf8) {
++      hnj_hyphen_load_line("1\xe2\x80\x93" "1\n", dict[k], hashtab); /* endash */
++      hnj_hyphen_load_line("1\xe2\x80\x99" "1\n", dict[k], hashtab); /* apostrophe */
++    }
++  }
+ 
+   /* Could do unioning of matches here (instead of the preprocessor script).
+      If we did, the pseudocode would look something like this:
+@@ -476,7 +492,15 @@
+   state_num = 0;
+ }
+   fclose(f);
+-  if (k == 2) dict[0]->nextlevel = dict[1];
++  if (nextlevel) dict[0]->nextlevel = dict[1];
++  else {
++    dict[1] -> nextlevel = dict[0];
++    dict[1]->lhmin = dict[0]->lhmin;
++    dict[1]->rhmin = dict[0]->rhmin;
++    dict[1]->clhmin = (dict[0]->clhmin) ? dict[0]->clhmin : ((dict[0]->lhmin) ? dict[0]->lhmin : 2);
++    dict[1]->crhmin = (dict[0]->crhmin) ? dict[0]->crhmin : ((dict[0]->rhmin) ? dict[0]->rhmin : 2);
++    return dict[1];
++  }
+   return dict[0];
+ }
+ 
+@@ -527,8 +551,13 @@
+   j = 0;
+   prep_word[j++] = '.';
+ 
+-  for (i = 0; i < word_size; i++)
++  for (i = 0; i < word_size; i++) {
++    if (word[i] <= '9' && word[i] >= '0') {
++      prep_word[j++] = '.';
++    } else {
+       prep_word[j++] = word[i];
++    }
++  }
+ 
+   prep_word[j++] = '.';
+   prep_word[j] = '\0';
+@@ -670,6 +699,9 @@
+       i += hnj_ligature(word[2]);
+     }
+ 
++    // ignore numbers
++    for (j = 0; word[j] <= '9' && word[j] >= '0'; j++) i--;
++
+     for (j = 0; i < lhmin && word[j] != '\0'; i++) do {
+       // check length of the non-standard part
+       if (*rep && *pos && *cut && (*rep)[j]) {
+@@ -696,9 +728,13 @@
+ int hnj_hyphen_rhmin(int utf8, const char *word, int word_size, char * hyphens,
+ 	char *** rep, int ** pos, int ** cut, int rhmin)
+ {
+-    int i;
+-    int j = word_size - 2;    
+-    for (i = 1; i < rhmin && j > 0; j--) {
++    int i = 1;
++    int j;
++
++    // ignore numbers
++    for (j = word_size - 1; j > 0 && word[j] <= '9' && word[j] >= '0'; j--) i--;
++
++    for (j = word_size - 2; i < rhmin && j > 0; j--) {
+       // check length of the non-standard part
+       if (*rep && *pos && *cut && (*rep)[j]) {
+         char * rh = strchr((*rep)[j], '=');
+@@ -756,8 +792,15 @@
+   j = 0;
+   prep_word[j++] = '.';
+   
+-  for (i = 0; i < word_size; i++)
++  for (i = 0; i < word_size; i++) {
++    if (word[i] <= '9' && word[i] >= '0') {
++      prep_word[j++] = '.';
++    } else {
+       prep_word[j++] = word[i];
++    }
++  }
++
++
+ 
+   prep_word[j++] = '.';
+   prep_word[j] = '\0';
+@@ -1093,8 +1136,10 @@
+ 	char *hyphword, char *** rep, int ** pos, int ** cut,
+ 	int lhmin, int rhmin, int clhmin, int crhmin)
+ {
+-  lhmin = (lhmin > 0 ? lhmin : dict->lhmin);
+-  rhmin = (rhmin > 0 ? rhmin : dict->rhmin);
++  lhmin = (lhmin > dict->lhmin) ? lhmin : dict->lhmin;
++  rhmin = (rhmin > dict->rhmin) ? rhmin : dict->rhmin;
++  clhmin = (clhmin > dict->clhmin) ? clhmin : dict->clhmin;
++  crhmin = (crhmin > dict->crhmin) ? crhmin : dict->crhmin;
+   hnj_hyphen_hyph_(dict, word, word_size, hyphens, rep, pos, cut,
+     clhmin, crhmin, 1, 1);
+   hnj_hyphen_lhmin(dict->utf8, word, word_size, hyphens,
diff --git a/hyphen/makefile.mk b/hyphen/makefile.mk
index 80f5bbd..06cfc58 100644
--- a/hyphen/makefile.mk
+++ b/hyphen/makefile.mk
@@ -43,7 +43,8 @@ ADDITIONAL_FILES += makefile.mk
 
 PATCH_FILES= \
     hyphen-2.7.1.patch \
-    hyphen-2.7.1-read-charset.patch
+    hyphen-2.7.1-read-charset.patch \
+    hyphen-2.7.1-2.8.2.patch
 
 .IF "$(GUI)"=="UNX"
 CONFIGURE_DIR=$(BUILD_DIR)
-- 
1.7.4.1

From a44474b261c37b44fe50324cd888ed4d8e99bf26 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?L=C3=A1szl=C3=B3=20N=C3=A9meth?= <nem...@numbertext.org>
Date: Fri, 7 Oct 2011 16:49:29 +0200
Subject: [PATCH] Fix hyphenation of words with hyphens (handled by Hyphen 2.8.2)

---
 dictionaries/en/README_hyph_en_GB.txt |    4 +
 dictionaries/en/README_hyph_en_US.txt |    4 +
 dictionaries/en/hyph_en_GB.dic        |  272 ---------------------------------
 dictionaries/en/hyph_en_US.dic        |  272 ---------------------------------
 4 files changed, 8 insertions(+), 544 deletions(-)

diff --git a/dictionaries/en/README_hyph_en_GB.txt b/dictionaries/en/README_hyph_en_GB.txt
index b6acd82..4afac7b 100644
--- a/dictionaries/en/README_hyph_en_GB.txt
+++ b/dictionaries/en/README_hyph_en_GB.txt
@@ -1,5 +1,9 @@
 hyph_en_GB.dic - British English hyphenation patterns for OpenOffice.org
 
+version 2011-10-07
+
+- remove unnecessary parts for Hyphen 2.8.2
+
 version 2010-03-16
 
 Changes
diff --git a/dictionaries/en/README_hyph_en_US.txt b/dictionaries/en/README_hyph_en_US.txt
index 649bcfc..8420172 100644
--- a/dictionaries/en/README_hyph_en_US.txt
+++ b/dictionaries/en/README_hyph_en_US.txt
@@ -1,5 +1,9 @@
 hyph_en_US.dic - American English hyphenation patterns for OpenOffice.org
 
+version 2011-10-07
+
+- remove unnecessary parts for the new Hyphen 2.8.2
+
 version 2010-03-16
 
 Changes
diff --git a/dictionaries/en/hyph_en_GB.dic b/dictionaries/en/hyph_en_GB.dic
index a310712..e19d0ea 100644
--- a/dictionaries/en/hyph_en_GB.dic
+++ b/dictionaries/en/hyph_en_GB.dic
@@ -3,13 +3,6 @@ LEFTHYPHENMIN 2
 RIGHTHYPHENMIN 3
 COMPOUNDLEFTHYPHENMIN 2
 COMPOUNDRIGHTHYPHENMIN 3
-1'.
-1's./'=s,1,2
-1't./'=t,1,2
-1’.
-1’s./’=s,1,2
-1’t./’=t,1,2
-NEXTLEVEL
 .1ab
 .ab4i
 .abo2
@@ -13391,271 +13384,6 @@ z5z2ot2
 .someth1in
 .somethi4ng
 .some5thing.
-8'8
-8a8'8
-8b8'8
-8c8'8
-8d8'8
-8e8'8
-8f8'8
-8g8'8
-8h8'8
-8i8'8
-8j8'8
-8k8'8
-8l8'8
-8m8'8
-8n8'8
-8o8'8
-8p8'8
-8q8'8
-8r8'8
-8s8'8
-8t8'8
-8u8'8
-8v8'8
-8w8'8
-8x8'8
-8y8'8
-8z8'8
-'a8
-'b8
-'c8
-'d8
-'e8
-'f8
-'g8
-'h8
-'i8
-'j8
-'k8
-'l8
-'m8
-'n8
-'o8
-'p8
-'q8
-'r8
-'s8
-'t8
-'u8
-'v8
-'w8
-'x8
-'y8
-'z8
-8’8
-8a8’8
-8b8’8
-8c8’8
-8d8’8
-8e8’8
-8f8’8
-8g8’8
-8h8’8
-8i8’8
-8j8’8
-8k8’8
-8l8’8
-8m8’8
-8n8’8
-8o8’8
-8p8’8
-8q8’8
-8r8’8
-8s8’8
-8t8’8
-8u8’8
-8v8’8
-8w8’8
-8x8’8
-8y8’8
-8z8’8
-’a8
-’b8
-’c8
-’d8
-’e8
-’f8
-’g8
-’h8
-’i8
-’j8
-’k8
-’l8
-’m8
-’n8
-’o8
-’p8
-’q8
-’r8
-’s8
-’t8
-’u8
-’v8
-’w8
-’x8
-’y8
-’z8
-8-8
-8a8-8
-8b8-8
-8c8-8
-8d8-8
-8e8-8
-8f8-8
-8g8-8
-8h8-8
-8i8-8
-8j8-8
-8k8-8
-8l8-8
-8m8-8
-8n8-8
-8o8-8
-8p8-8
-8q8-8
-8r8-8
-8s8-8
-8t8-8
-8u8-8
-8v8-8
-8w8-8
-8x8-8
-8y8-8
-8z8-8
--a8
--b8
--c8
--d8
--e8
--f8
--g8
--h8
--i8
--j8
--k8
--l8
--m8
--n8
--o8
--p8
--q8
--r8
--s8
--t8
--u8
--v8
--w8
--x8
--y8
--z8
-8–8
-8a8–8
-8b8–8
-8c8–8
-8d8–8
-8e8–8
-8f8–8
-8g8–8
-8h8–8
-8i8–8
-8j8–8
-8k8–8
-8l8–8
-8m8–8
-8n8–8
-8o8–8
-8p8–8
-8q8–8
-8r8–8
-8s8–8
-8t8–8
-8u8–8
-8v8–8
-8w8–8
-8x8–8
-8y8–8
-8z8–8
-–a8
-–b8
-–c8
-–d8
-–e8
-–f8
-–g8
-–h8
-–i8
-–j8
-–k8
-–l8
-–m8
-–n8
-–o8
-–p8
-–q8
-–r8
-–s8
-–t8
-–u8
-–v8
-–w8
-–x8
-–y8
-–z8
-8—8
-8a8—8
-8b8—8
-8c8—8
-8d8—8
-8e8—8
-8f8—8
-8g8—8
-8h8—8
-8i8—8
-8j8—8
-8k8—8
-8l8—8
-8m8—8
-8n8—8
-8o8—8
-8p8—8
-8q8—8
-8r8—8
-8s8—8
-8t8—8
-8u8—8
-8v8—8
-8w8—8
-8x8—8
-8y8—8
-8z8—8
-—a8
-—b8
-—c8
-—d8
-—e8
-—f8
-—g8
-—h8
-—i8
-—j8
-—k8
-—l8
-—m8
-—n8
-—o8
-—p8
-—q8
-—r8
-—s8
-—t8
-—u8
-—v8
-—w8
-—x8
-—y8
-—z8
 .afflat7u4s4es.
 .affl2
 .affl2a2tu
diff --git a/dictionaries/en/hyph_en_US.dic b/dictionaries/en/hyph_en_US.dic
index a1fd879..d7dd292 100644
--- a/dictionaries/en/hyph_en_US.dic
+++ b/dictionaries/en/hyph_en_US.dic
@@ -3,13 +3,6 @@ LEFTHYPHENMIN 2
 RIGHTHYPHENMIN 3
 COMPOUNDLEFTHYPHENMIN 2
 COMPOUNDRIGHTHYPHENMIN 3
-1'.
-1's./'=s,1,2
-1't./'=t,1,2
-1’.
-1’s./’=s,1,2
-1’t./’=t,1,2
-NEXTLEVEL
 .a2ch4
 .ad4der
 .a2d
@@ -10997,271 +10990,6 @@ z4zy
 .zeits2ch2
 .zeitsc4hr4
 .zeitschr4i2ft
-8'8
-8a8'8
-8b8'8
-8c8'8
-8d8'8
-8e8'8
-8f8'8
-8g8'8
-8h8'8
-8i8'8
-8j8'8
-8k8'8
-8l8'8
-8m8'8
-8n8'8
-8o8'8
-8p8'8
-8q8'8
-8r8'8
-8s8'8
-8t8'8
-8u8'8
-8v8'8
-8w8'8
-8x8'8
-8y8'8
-8z8'8
-'a8
-'b8
-'c8
-'d8
-'e8
-'f8
-'g8
-'h8
-'i8
-'j8
-'k8
-'l8
-'m8
-'n8
-'o8
-'p8
-'q8
-'r8
-'s8
-'t8
-'u8
-'v8
-'w8
-'x8
-'y8
-'z8
-8’8
-8a8’8
-8b8’8
-8c8’8
-8d8’8
-8e8’8
-8f8’8
-8g8’8
-8h8’8
-8i8’8
-8j8’8
-8k8’8
-8l8’8
-8m8’8
-8n8’8
-8o8’8
-8p8’8
-8q8’8
-8r8’8
-8s8’8
-8t8’8
-8u8’8
-8v8’8
-8w8’8
-8x8’8
-8y8’8
-8z8’8
-’a8
-’b8
-’c8
-’d8
-’e8
-’f8
-’g8
-’h8
-’i8
-’j8
-’k8
-’l8
-’m8
-’n8
-’o8
-’p8
-’q8
-’r8
-’s8
-’t8
-’u8
-’v8
-’w8
-’x8
-’y8
-’z8
-8-8
-8a8-8
-8b8-8
-8c8-8
-8d8-8
-8e8-8
-8f8-8
-8g8-8
-8h8-8
-8i8-8
-8j8-8
-8k8-8
-8l8-8
-8m8-8
-8n8-8
-8o8-8
-8p8-8
-8q8-8
-8r8-8
-8s8-8
-8t8-8
-8u8-8
-8v8-8
-8w8-8
-8x8-8
-8y8-8
-8z8-8
--a8
--b8
--c8
--d8
--e8
--f8
--g8
--h8
--i8
--j8
--k8
--l8
--m8
--n8
--o8
--p8
--q8
--r8
--s8
--t8
--u8
--v8
--w8
--x8
--y8
--z8
-8–8
-8a8–8
-8b8–8
-8c8–8
-8d8–8
-8e8–8
-8f8–8
-8g8–8
-8h8–8
-8i8–8
-8j8–8
-8k8–8
-8l8–8
-8m8–8
-8n8–8
-8o8–8
-8p8–8
-8q8–8
-8r8–8
-8s8–8
-8t8–8
-8u8–8
-8v8–8
-8w8–8
-8x8–8
-8y8–8
-8z8–8
-–a8
-–b8
-–c8
-–d8
-–e8
-–f8
-–g8
-–h8
-–i8
-–j8
-–k8
-–l8
-–m8
-–n8
-–o8
-–p8
-–q8
-–r8
-–s8
-–t8
-–u8
-–v8
-–w8
-–x8
-–y8
-–z8
-8—8
-8a8—8
-8b8—8
-8c8—8
-8d8—8
-8e8—8
-8f8—8
-8g8—8
-8h8—8
-8i8—8
-8j8—8
-8k8—8
-8l8—8
-8m8—8
-8n8—8
-8o8—8
-8p8—8
-8q8—8
-8r8—8
-8s8—8
-8t8—8
-8u8—8
-8v8—8
-8w8—8
-8x8—8
-8y8—8
-8z8—8
-—a8
-—b8
-—c8
-—d8
-—e8
-—f8
-—g8
-—h8
-—i8
-—j8
-—k8
-—l8
-—m8
-—n8
-—o8
-—p8
-—q8
-—r8
-—s8
-—t8
-—u8
-—v8
-—w8
-—x8
-—y8
-—z8
 .affin9i1ty
 .affin2it
 .affin9ity's8
-- 
1.7.4.1

Attachment: hyphenation_nohyphen_number_test.odt
Description: application/vnd.oasis.opendocument.text

_______________________________________________
LibreOffice mailing list
LibreOffice@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/libreoffice

Reply via email to