[Libreoffice-commits] .: dictionaries/Dictionary_ru.mk dictionaries/en dictionaries/hu_HU dictionaries/ru_RU

Libreoffice Gerrit user logerrit at kemper.freedesktop.org
Fri Aug 24 05:57:28 PDT 2012


 dictionaries/Dictionary_ru.mk                                                 |    1 
 dictionaries/en/Lightproof.py                                                 |  406 +---------
 dictionaries/en/README_lightproof_en.txt                                      |    4 
 dictionaries/en/pythonpath/lightproof_en.py                                   |    1 
 dictionaries/en/pythonpath/lightproof_handler_en.py                           |   76 -
 dictionaries/en/pythonpath/lightproof_impl_en.py                              |  334 ++++++++
 dictionaries/hu_HU/Lightproof.py                                              |  319 +------
 dictionaries/hu_HU/README_lightproof_hu_HU.txt                                |   11 
 dictionaries/hu_HU/pythonpath/lightproof_handler_hu_HU.py                     |   76 -
 dictionaries/hu_HU/pythonpath/lightproof_hu_HU.py                             |    1 
 dictionaries/hu_HU/pythonpath/lightproof_impl_hu_HU.py                        |  246 ++++++
 dictionaries/ru_RU/Lightproof.py                                              |  307 +------
 dictionaries/ru_RU/README_Lightproof_ru_RU.txt                                |    2 
 dictionaries/ru_RU/dialog/registry/schema/org/openoffice/Lightproof_ru_RU.xcs |   12 
 dictionaries/ru_RU/dialog/ru_RU.xdl                                           |   25 
 dictionaries/ru_RU/dialog/ru_RU_en_US.properties                              |    3 
 dictionaries/ru_RU/dialog/ru_RU_ru_RU.properties                              |   25 
 dictionaries/ru_RU/pythonpath/lightproof_handler_ru_RU.py                     |   76 -
 dictionaries/ru_RU/pythonpath/lightproof_impl_ru_RU.py                        |  233 +++++
 dictionaries/ru_RU/pythonpath/lightproof_opts_ru_RU.py                        |    4 
 dictionaries/ru_RU/pythonpath/lightproof_ru_RU.py                             |    2 
 21 files changed, 1180 insertions(+), 984 deletions(-)

New commits:
commit 28f07b24d9f0d971e4a50663cd0bdf52dd025fb5
Author: László Németh <nemeth at numbertext.org>
Date:   Fri Aug 24 14:13:14 2012 +0200

    restructured grammar checkers + small rule fixes: fdo#46542, fdo#46549, etc.
    
    Change-Id: I087ae647d1c8ce57abd7d1f4dc196f25458f90dc

diff --git a/dictionaries/Dictionary_ru.mk b/dictionaries/Dictionary_ru.mk
index 95c466b..4321862 100644
--- a/dictionaries/Dictionary_ru.mk
+++ b/dictionaries/Dictionary_ru.mk
@@ -23,6 +23,7 @@ $(eval $(call gb_Dictionary_add_root_files,dict-ru,\
 $(eval $(call gb_Dictionary_add_files,dict-ru,dialog,\
 	dictionaries/ru_RU/dialog/ru_RU.xdl \
 	dictionaries/ru_RU/dialog/ru_RU_en_US.default \
+	dictionaries/ru_RU/dialog/ru_RU_ru_RU.properties \
 ))
 
 $(eval $(call gb_Dictionary_add_files,dict-ru,pythonpath,\
diff --git a/dictionaries/en/Lightproof.py b/dictionaries/en/Lightproof.py
index 0897e75..cdc93c6 100644
--- a/dictionaries/en/Lightproof.py
+++ b/dictionaries/en/Lightproof.py
@@ -1,235 +1,26 @@
 # -*- encoding: UTF-8 -*-
 # Lightproof grammar checker for LibreOffice and OpenOffice.org
-# http://launchpad.net/lightproof
-# version 1.4.3 (2011-12-05)
-#
-# 2009-2011 (c) László Németh (nemeth at numbertext org), license: MPL 1.1 / GPLv3+ / LGPLv3+
+# 2009-2012 (c) László Németh (nemeth at numbertext org), license: MPL 1.1 / GPLv3+ / LGPLv3+
 
-import uno, unohelper, sys, traceback, re
+import uno, unohelper, os, sys, traceback
 from lightproof_impl_en import locales
 from lightproof_impl_en import pkg
+import lightproof_impl_en
 import lightproof_handler_en
-from string import join
 
 from com.sun.star.linguistic2 import XProofreader, XSupportedLocales
 from com.sun.star.linguistic2 import ProofreadingResult, SingleProofreadingError
 from com.sun.star.lang import XServiceInfo, XServiceName, XServiceDisplayName
 from com.sun.star.lang import Locale
-from com.sun.star.text.TextMarkupType import PROOFREADING
-from com.sun.star.beans import PropertyValue
-
-# loaded rules
-langrule = {}
-# ignored rules
-ignore = {}
-
-# cache for morphogical analyses
-analyses = {}
-stems = {}
-suggestions = {}
-
-# assign Calc functions
-calcfunc = None
-
-# check settings
-def option(lang, opt):
-    return lightproof_handler_en.get_option(lang.Language + "_" + lang.Country, opt)
-
-# filtering affix fields (ds, is, ts etc.)
-def onlymorph(st):
-    if st != None:
-        st = re.sub(r"^.*(st:|po:)", r"\1", st) # keep last word part
-        st = re.sub(r"\b(?=[dit][sp]:)","@", st) # and its affixes
-        st = re.sub(r"(?<!@)\b\w\w:\w+","", st).replace('@','').strip()
-    return st
-
-# if the pattern matches all analyses of the input word, 
-# return the last matched substring
-def _morph(rLoc, word, pattern, all, onlyaffix):
-    global analyses
-    if word == None:
-        return None
-    if word not in analyses:
-        x = spellchecker.spell(u"<?xml?><query type='analyze'><word>" + word + "</word></query>", rLoc, ())
-        if not x:
-            return None
-        t = x.getAlternatives()
-        if not t:
-            t = [""]
-        analyses[word] = t[0]
-    a = analyses[word].split("</a>")[:-1]
-    result = None
-    p = re.compile(pattern)
-    for i in a:
-        if onlyaffix:
-            i = onlymorph(i)
-        result = p.search(i)
-        if result:
-            result = result.group(0)
-            if not all:
-                return result
-        elif all:
-            return None
-    return result
-
-def morph(rLoc, word, pattern, all=True):
-    return _morph(rLoc, word, pattern, all, False)
-
-def affix(rLoc, word, pattern, all=True):
-    return _morph(rLoc, word, pattern, all, True)
-
-def spell(rLoc, word):
-    if word == None:
-        return None
-    return spellchecker.isValid(word, rLoc, ())
-
-# get the tuple of the stem of the word or an empty array
-def stem(rLoc, word):
-    global stems
-    if word == None:
-        return []
-    if not word in stems:
-        x = spellchecker.spell(u"<?xml?><query type='stem'><word>" + word + "</word></query>", rLoc, ())
-        if not x:
-            return []
-        t = x.getAlternatives()
-        if not t:
-            t = []
-        stems[word] = list(t)
-    return stems[word]
-
-# get the tuple of the morphological generation of a word or an empty array
-def generate(rLoc, word, example):
-    if word == None:
-        return []
-    x = spellchecker.spell(u"<?xml?><query type='generate'><word>" + word + "</word><word>" + example + "</word></query>", rLoc, ())
-    if not x:
-        return []
-    t = x.getAlternatives()
-    if not t:
-        t = []
-    return list(t)
-
-# get suggestions
-def suggest(rLoc, word):
-    global suggestions
-    if word == None:
-        return word
-    if word not in suggestions:
-        x = spellchecker.spell("_" + word, rLoc, ())
-        if not x:
-            return word
-        t = x.getAlternatives()
-        suggestions[word] = join(t, "\n")
-    return suggestions[word]
-
-# get the nth word of the input string or None
-def word(s, n):
-    a = re.match("(?u)( [-.\w%]+){" + str(n-1) + "}( [-.\w%]+)", s)
-    if not a:
-        return None
-    return a.group(2)[1:]
-
-# get the (-)nth word of the input string or None
-def wordmin(s, n):
-    a = re.search("(?u)([-.\w%]+ )([-.\w%]+ ){" + str(n-1) + "}$", s)
-    if not a:
-        return None
-    return a.group(1)[:-1]
-
-def calc(funcname, par):
-    global calcfunc
-    global SMGR
-    if calcfunc == None:
-        calcfunc = SMGR.createInstance( "com.sun.star.sheet.FunctionAccess")
-        if calcfunc == None:
-                return None
-    return calcfunc.callFunction(funcname, par)
-
-def proofread( nDocId, TEXT, LOCALE, nStartOfSentencePos, nSuggestedSentenceEndPos, rProperties ):
-    global ignore
-    aErrs = []
-    s = TEXT[nStartOfSentencePos:nSuggestedSentenceEndPos]
-    for i in get_rule(LOCALE):
-        if i[0] and not str(i[0]) in ignore:
-            for m in i[0].finditer(s):
-              if not i[3] or eval(i[3]):
-                aErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
-                aErr.nErrorStart        = nStartOfSentencePos + m.start(0) # nStartOfSentencePos
-                aErr.nErrorLength       = m.end(0) - m.start(0)
-                aErr.nErrorType         = PROOFREADING
-                aErr.aRuleIdentifier    = str(i[0])
-                iscap = (i[4] and m.group(0)[0:1].isupper())
-                if i[1][0:1] == "=":
-                        aErr.aSuggestions = tuple(cap(eval(i[1][1:]).split("\n"), iscap, LOCALE))
-                else:
-                        aErr.aSuggestions = tuple(cap(m.expand(i[1]).split("\n"), iscap, LOCALE))
-                comment = i[2]
-                if comment[0:1] == "=":
-                        comment = eval(comment[1:])
-                aErr.aShortComment      = comment.split("\\n")[0].strip()
-                aErr.aFullComment       = comment.split("\\n")[-1].strip()
-                if "://" in aErr.aFullComment:
-                        p = PropertyValue()
-                        p.Name = "FullCommentURL"
-                        p.Value = aErr.aFullComment
-                        aErr.aFullComment = aErr.aShortComment
-                        aErr.aProperties        = (p,)
-                else:
-                        aErr.aProperties        = ()
-                aErrs = aErrs + [aErr]
-    return tuple(aErrs)
-
-def cap(a, iscap, rLoc):
-    if iscap:
-        for i in range(0, len(a)):
-            if a[i][0:1] == "i":
-                if rLoc.Language == "tr" or rLoc.Language == "az":
-                    a[i] = u"\u0130" + a[i][1:]
-                elif a[i][1:2] == "j" and rLoc.Language == "nl":
-                    a[i] = "IJ" + a[i][2:]
-                else:
-                    a[i] = "I" + a[i][1:]
-            else:
-                a[i] = a[i].capitalize()
-    return a
-
-def get_rule(rLocale):
-        module = rLocale.Language
-        if rLocale.Country != "":
-                module = module + "_" + rLocale.Country
-        try:
-                return langrule[module]
-        except:
-                try:
-                        module = rLocale.Language
-                        return langrule[module]
-                except:
-                        try:
-                                d = __import__("lightproof_" + pkg)
-                        except:
-                                print "Error: missing language data: " + module
-                                return None
-        # compile regular expressions
-        for i in d.dic:
-                try:
-                        if re.compile("[(][?]iu[)]").match(i[0]):
-                                i += [True]
-                                i[0] = re.sub("[(][?]iu[)]", "(?u)", i[0])
-                        else:
-                                i += [False]
-                        i[0] = re.compile(i[0])
-                except:
-                        print "Lightproof: bad rule -- ", i[0]
-                        i[0] = None
-        langrule[module] = d.dic
-        return langrule[module]
+# reload in obj.reload in Python 3
+try:
+    from obj import reload
+except:
+    pass
 
 class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XServiceDisplayName, XSupportedLocales):
 
     def __init__( self, ctx, *args ):
-        global spellchecker
-        global SMGR
         self.ctx = ctx
         self.ServiceName = "com.sun.star.linguistic2.Proofreader"
         self.ImplementationName = "org.openoffice.comp.pyuno.Lightproof." + pkg
@@ -240,8 +31,9 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
             self.locales += [Locale(l[0], l[1], l[2])]
         self.locales = tuple(self.locales)
         currentContext = uno.getComponentContext()
-        SMGR = currentContext.ServiceManager
-        spellchecker = SMGR.createInstanceWithContext("com.sun.star.linguistic2.SpellChecker", currentContext)
+        lightproof_impl_en.SMGR = currentContext.ServiceManager
+        lightproof_impl_en.spellchecker = \
+            lightproof_impl_en.SMGR.createInstanceWithContext("com.sun.star.linguistic2.SpellChecker", currentContext)
         lightproof_handler_en.load(currentContext)
 
     # XServiceName method implementations
@@ -250,13 +42,13 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
 
     # XServiceInfo method implementations
     def getImplementationName (self):
-                return self.ImplementationName
+        return self.ImplementationName
 
     def supportsService(self, ServiceName):
-                return (ServiceName in self.SupportedServiceNames)
+        return (ServiceName in self.SupportedServiceNames)
 
     def getSupportedServiceNames (self):
-                return self.SupportedServiceNames
+        return self.SupportedServiceNames
 
     # XSupportedLocales
     def hasLocale(self, aLocale):
@@ -282,6 +74,48 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
         aRes.aLocale = rLocale
         aRes.nStartOfSentencePosition = nStartOfSentencePos
         aRes.nStartOfNextSentencePosition = nSuggestedSentenceEndPos
+        aRes.aProperties = ()
+        aRes.xProofreader = self
+        aRes.aErrors = ()
+        if len(rProperties) > 0 and rProperties[0].Name == "Update":
+            try:
+                import lightproof_compile_en
+                try:
+                    code = lightproof_compile_en.c(rProperties[0].Value, rLocale.Language, True)
+                except Exception as e:
+                    aRes.aText, aRes.nStartOfSentencePosition = e
+                    return aRes
+                path = lightproof_impl_en.get_path()
+                f = open(path.replace("_impl", ""), "w")
+                f.write("dic = %s" % code["rules"])
+                f.close()
+                if pkg in lightproof_impl_en.langrule:
+                    mo = lightproof_impl_en.langrule[pkg]
+                    reload(mo)
+                    lightproof_impl_en.compile_rules(mo.dic)
+                    lightproof_impl_en.langrule[pkg] = mo
+                if "code" in code:
+                    f = open(path, "r")
+                    ft = f.read()
+                    f.close()
+                    f = open(path, "w")
+                    f.write(ft[:ft.find("# [code]") + 8] + "\n" + code["code"])
+                    f.close()
+                    try:
+                        reload(lightproof_impl_en)
+                    except Exception as e:
+                        aRes.aText = e.args[0]
+                        if e.args[1][3] == "": # "expected an indented block" (end of file)
+                            aRes.nStartOfSentencePosition = len(rText.split("\n"))
+                        else:
+                            aRes.nStartOfSentencePosition = rText.split("\n").index(e.args[1][3][:-1]) + 1
+                        return aRes
+                aRes.aText = ""
+                return aRes
+            except:
+                if 'PYUNO_LOGLEVEL' in os.environ:
+                    print(traceback.format_exc())
+
         l = rText[nSuggestedSentenceEndPos:nSuggestedSentenceEndPos+1]
         while l == " ":
             aRes.nStartOfNextSentencePosition = aRes.nStartOfNextSentencePosition + 1
@@ -291,131 +125,31 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
         aRes.nBehindEndOfSentencePosition = aRes.nStartOfNextSentencePosition
 
         try:
-            aRes.aErrors = proofread( nDocId, rText, rLocale, \
+            aRes.aErrors = lightproof_impl_en.proofread( nDocId, rText, rLocale, \
                 nStartOfSentencePos, aRes.nBehindEndOfSentencePosition, rProperties)
-        except:
-            # traceback.print_exc(file=sys.stdout)
-            aRes.aErrors = ()
-        aRes.aProperties = ()
-        aRes.xProofreader = self
+        except Exception as e:
+            if len(rProperties) > 0 and rProperties[0].Name == "Debug" and len(e.args) == 2:
+                aRes.aText, aRes.nStartOfSentencePosition = e
+            else:
+                if 'PYUNO_LOGLEVEL' in os.environ:
+                    print(traceback.format_exc())
         return aRes
 
     def ignoreRule(self, rid, aLocale):
-        global ignore
-        ignore[rid] = 1
+        lightproof_impl_en.ignore[rid] = 1
 
     def resetIgnoreRules(self):
-        global ignore
-        ignore = {}
+        lightproof_impl_en.ignore = {}
 
     # XServiceDisplayName
     def getServiceDisplayName(self, aLocale):
-        return "Lightproof Grammar Checker (" + pkg + ")"
+        return lightproof_impl_en.name
 
 g_ImplementationHelper = unohelper.ImplementationHelper()
 g_ImplementationHelper.addImplementation( Lightproof, \
-        "org.openoffice.comp.pyuno.Lightproof." + pkg,
-        ("com.sun.star.linguistic2.Proofreader",),)
+    "org.openoffice.comp.pyuno.Lightproof." + pkg,
+    ("com.sun.star.linguistic2.Proofreader",),)
 
 g_ImplementationHelper.addImplementation( lightproof_handler_en.LightproofOptionsEventHandler, \
-        "org.openoffice.comp.pyuno.LightproofOptionsEventHandler." + pkg,
-        ("com.sun.star.awt.XContainerWindowEventHandler",),)
-# pattern matching for common English abbreviations
-abbrev = re.compile("(?i)\\b([a-z]|acct|approx|appt|apr|apt|assoc|asst|aug|ave|avg|co(nt|rp)?|ct|dec|defn|dept|dr|eg|equip|esp|est|etc|excl|ext|feb|fri|ft|govt?|hrs?|ib(id)?|ie|in(c|t)?|jan|jr|jul|lit|ln|mar|max|mi(n|sc)?|mon|Mrs?|mun|natl?|neg?|no(rm|s|v)?|nw|obj|oct|org|orig|pl|pos|prev|proj|psi|qty|rd|rec|rel|reqd?|resp|rev|sat|sci|se(p|pt)?|spec(if)?|sq|sr|st|subj|sun|sw|temp|thurs|tot|tues|univ|var|vs)\\.")
-
-# pattern for paragraph checking
-paralcap = re.compile(u"(?u)^[a-z].*[.?!] [A-Z].*[.?!][)\u201d]?$")
-
-
-punct = { "?": "question mark", "!": "exclamation mark",
-  ",": "comma", ":": "colon", ";": "semicolon",
-  "(": "opening parenthesis", ")": "closing parenthesis",
-  "[": "opening square bracket", "]": "closing square bracket",
-  u"\u201c": "opening quotation mark", u"\u201d": "closing quotation mark"}
-
-
-aA = set(["eucalypti", "eucalyptus", "Eucharist", "Eucharistic",
-"euchre", "euchred", "euchring", "Euclid", "euclidean", "Eudora",
-"eugene", "Eugenia", "eugenic", "eugenically", "eugenicist",
-"eugenicists", "eugenics", "Eugenio", "eukaryote", "Eula", "eulogies",
-"eulogist", "eulogists", "eulogistic", "eulogized", "eulogizer",
-"eulogizers", "eulogizing", "eulogy", "eulogies", "Eunice", "eunuch",
-"eunuchs", "Euphemia", "euphemism", "euphemisms", "euphemist",
-"euphemists", "euphemistic", "euphemistically", "euphonious",
-"euphoniously", "euphonium", "euphony", "euphoria", "euphoric",
-"Euphrates", "euphuism", "Eurasia", "Eurasian", "Eurasians", "eureka",
-"eurekas", "eurhythmic", "eurhythmy", "Euridyce", "Euripides", "euripus",
-"Euro", "Eurocentric", "Euroclydon", "Eurocommunism", "Eurocrat",
-"eurodollar", "Eurodollar", "Eurodollars", "Euromarket", "Europa",
-"Europe", "European", "Europeanisation", "Europeanise", "Europeanised",
-"Europeanization", "Europeanize", "Europeanized", "Europeans", "europium",
-"Eurovision", "Eustace", "Eustachian", "Eustacia", "euthanasia",
-"Ewart", "ewe", "Ewell", "ewer", "ewers", "Ewing", "once", "one",
-"oneness", "ones", "oneself", "onetime", "oneway", "oneyear", "u",
-"U", "UART", "ubiquitous", "ubiquity", "Udale", "Udall", "UEFA",
-"Uganda", "Ugandan", "ugric", "UK", "ukase", "Ukraine", "Ukrainian",
-"Ukrainians", "ukulele", "Ula", "ululated", "ululation", "Ulysses",
-"UN", "unanimity", "unanimous", "unanimously", "unary", "Unesco",
-"UNESCO", "UNHCR", "uni", "unicameral", "unicameralism", "Unicef",
-"UNICEF", "unicellular", "Unicode", "unicorn", "unicorns", "unicycle",
-"unicyclist", "unicyclists", "unidimensional", "unidirectional",
-"unidirectionality", "unifiable", "unification", "unified", "unifier",
-"unifilar", "uniform", "uniformally", "uniformed", "uniformer",
-"uniforming", "uniformisation", "uniformise", "uniformitarian",
-"uniformitarianism", "uniformity", "uniformly", "uniformness", "uniforms",
-"unify", "unifying", "unijugate", "unilateral", "unilateralisation",
-"unilateralise", "unilateralism", "unilateralist", "unilaterally",
-"unilinear", "unilingual", "uniliteral", "uniliteralism", "uniliteralist",
-"unimodal", "union", "unionism", "unionist", "unionists", "unionisation",
-"unionise", "unionised", "unionising", "unionization", "unionize",
-"unionized", "unionizing", "unions", "unipolar", "uniprocessor",
-"unique", "uniquely", "uniqueness", "uniquer", "Uniroyal", "unisex",
-"unison", "Unisys", "unit", "Unitarian", "Unitarianism", "Unitarians",
-"unitary", "unite", "united", "unitedly", "uniter", "unites", "uniting",
-"unitize", "unitizing", "unitless", "units", "unity", "univ", "Univac",
-"univalent", "univalve", "univariate", "universal", "universalisation",
-"universalise", "universalised", "universaliser", "universalisers",
-"universalising", "universalism", "universalist", "universalistic",
-"universality", "universalisation", "universalization", "universalize",
-"universalized", "universalizer", "universalizers", "universalizing",
-"universally", "universalness", "universe", "universes", "universities",
-"university", "univocal", "Unix", "uracil", "Urals", "uranium", "Uranus",
-"uranyl", "urate", "urea", "uremia", "uremic", "ureter", "urethane",
-"urethra", "urethral", "urethritis", "Urey", "Uri", "uric", "urinal",
-"urinalysis", "urinary", "urinated", "urinating", "urination", "urine",
-"urogenital", "urokinase", "urologist", "urologists", "urology",
-"Uruguay", "Uruguayan", "Uruguayans", "US", "USA", "usable", "usage",
-"usages", "use", "used", "useful", "usefulness", "usefully", "useless",
-"uselessly", "uselessness", "Usenet", "user", "users", "uses", "using",
-"usual", "usually", "usurer", "usurers", "usuress", "usurial", "usurious",
-"usurp", "usurpation", "usurped", "usurper", "usurping", "usurps",
-"usury", "Utah", "utensil", "utensils", "uterine", "uterus", "Utica",
-"utilitarian", "utilitarianism", "utilities", "utility", "utilizable",
-"utilization", "utilize", "utilized", "utilizes", "utilizing", "utopia",
-"utopian", "utopians", "utopias", "Utrecht", "Uttoxeter", "uvula",
-"uvular"])
-
-aAN = set(["f", "F", "FBI", "FDA", "heir", "heirdom", "heired",
-"heirer", "heiress", "heiring", "heirloom", "heirship", "honest",
-"honester", "honestly", "honesty", "honor", "honorable", "honorableness",
-"honorably", "honorarium", "honorary", "honored", "honorer", "honorific",
-"honoring", "honors", "honour", "honourable", "honourableness",
-"honourably", "honourarium", "honourary", "honoured", "honourer",
-"honourific", "honouring", "Honours", "hors", "hour", "hourglass", "hourlong",
-"hourly", "hours", "l", "L", "LCD", "m", "M", "MBA", "MP", "mpg", "mph",
-"MRI", "MSc", "MTV", "n", "N", "NBA", "NBC", "NFL", "NGO", "NHL", "r",
-"R", "s", "S", "SMS", "sos", "SOS", "SPF", "std", "STD", "SUV", "x",
-"X", "XML"])
-
-aB = set(["H", "hallucination", "haute", "hauteur", "herb", "herbaceous", "herbal",
-"herbalist", "herbalism", "heroic", "hilarious", "historian", "historic", "historical",
-"homage", "homophone", "horrendous", "hospitable", "horrific", "hotel", "hypothesis", "Xmas"])
-
-def measurement(mnum, min, mout, mstr, decimal, remove):
-    if min == "ft" or min == "in" or min == "mi":
-        mnum = mnum.replace(" 1/2", ".5").replace(u" \xbd", ".5").replace(u"\xbd",".5")
-    m = calc("CONVERT_ADD", (float(eval(mnum.replace(remove, "").replace(decimal, ".").replace(u"\u2212", "-"))), min, mout))
-    a = list(set([str(calc("ROUND", (m, 0)))[:-2], str(calc("ROUND", (m, 1))), str(calc("ROUND", (m, 2))), str(m)])) # remove duplicated rounded items
-    a.sort(lambda x, y: len(x) - len(y)) # sort by string length
-    return join(a, mstr + "\n").replace(".", decimal).replace("-", u"\u2212") + mstr
-
+    "org.openoffice.comp.pyuno.LightproofOptionsEventHandler." + pkg,
+    ("com.sun.star.awt.XContainerWindowEventHandler",),)
diff --git a/dictionaries/en/README_lightproof_en.txt b/dictionaries/en/README_lightproof_en.txt
index 0e6f33b..43c91fe 100644
--- a/dictionaries/en/README_lightproof_en.txt
+++ b/dictionaries/en/README_lightproof_en.txt
@@ -1,3 +1,3 @@
 English sentence checker for LibreOffice
-see http://launchpad.net/lightproof and http://numbertext.org/lightproof
-2011 (c) László Németh, license: MPL 1.1 / GPLv3+ / LGPLv3+
+see git://anongit.freedesktop.org/libreoffice/lightproof
+2011-2012 (c) László Németh, license: MPL 1.1 / GPLv3+ / LGPLv3+
diff --git a/dictionaries/en/pythonpath/lightproof_en.py b/dictionaries/en/pythonpath/lightproof_en.py
index a437797..3108489 100644
--- a/dictionaries/en/pythonpath/lightproof_en.py
+++ b/dictionaries/en/pythonpath/lightproof_en.py
@@ -1 +1,3 @@
-dic = [[u'(?u)(?<![-\\w\u2013.,\xad])and and(?![-\\w\u2013\xad])', u'and', u'Did you mean:', False], [u'(?u)(?<![-\\w\u2013.,\xad])or or(?![-\\w\u2013\xad])', u'or', u'Did you mean:', False], [u'(?u)(?<![-\\w\u2013.,\xad])for for(?![-\\w\u2013\xad])', u'for', u'Did you mean:', False], [u'(?u)(?<![-\\w\u2013.,\xad])the the(?![-\\w\u2013\xad])', u'the', u'Did you mean:', False], [u'(?iu)(?<![-\\w\u2013.,\xad])[Yy][Ii][Nn][Gg] [Aa][Nn][Dd] [Yy][Aa][Nn][Gg](?![-\\w\u2013\xad])', u'yin and yang', u'Did you mean:', False], [u'(?iu)(?<![-\\w\u2013.,\xad])[Ss][Cc][Oo][Tt] [Ff][Rr][Ee][Ee](?![-\\w\u2013\xad])', u'scot-free\\nscotfree', u'Did you mean:', False], [u"(?iu)(?<![-\\w\u2013.,\xad])([Yy][Oo][Uu][Rr]|[Hh][Ee][Rr]|[Oo][Uu][Rr]|[Tt][Hh][Ee][Ii][Rr])['\u2019][Ss](?![-\\w\u2013\xad])", u'\\1s', u'Possessive pronoun: \\n http://en.wikipedia.org/wiki/Possessive_pronoun', False], [u'(?u)(?<![-\\w\u2013.,\xad])(?P<a_1>[Aa])n(?P<_>[ ][\'\u2018"\u201c]?)(?P<vow_1>[aeiouAEIOU]\\w*)(?P<
 etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'\\g<a_1>\\g<_>\\g<vow_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', u'm.group("vow_1") in aA or m.group("vow_1").lower() in aA'], [u'(?u)(?<![-\\w\u2013.,\xad])a(?P<_>[ ][\'\u2018"\u201c]?)(?P<vow_1>[aeiouAEIOU]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'an\\g<_>\\g<vow_1>\\g<etc_1>', u'Bad article? \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', u'(m.group("vow_1") <> m.group("vow_1").upper()) and not (m.group("vow_1") in aA or m.group("vow_1").lower() in aA) and spell(LOCALE,m.group("vow_1"))'], [u'(?u)(?<![-\\w\u2013.,\xad])a(?P<_>[ ][\'\u2018"\u201c]?)(?P<con_1>[bcdfghj-np-tv-zBCDFGHJ-NP-TV-Z]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'an\\g<_>\\g<con_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', u'm.group("con_1
 ") in aAN or m.group("con_1").lower() in aAN'], [u'(?u)(?<![-\\w\u2013.,\xad])(?P<a_1>[Aa])n(?P<_>[ ][\'\u2018"\u201c]?)(?P<con_1>[bcdfghj-np-tv-zBCDFGHJ-NP-TV-Z]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'\\g<a_1>\\g<_>\\g<con_1>\\g<etc_1>', u'Bad article? \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', u'(m.group("con_1") <> m.group("con_1").upper()) and not (m.group("con_1") in aA or m.group("con_1").lower() in aAN) and not m.group("con_1") in aB and spell(LOCALE,m.group("con_1"))'], [u'(?u)((?<=[!?.] )|^)A(?P<_>[ ][\'\u2018"\u201c]?)(?P<vow_1>[aeiouAEIOU]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'An\\g<_>\\g<vow_1>\\g<etc_1>', u'Bad article? \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', u'(m.group("vow_1") <> m.group("vow_1").upper()) and not (m.group("vow_1") in aA or m.group("vow_1").lower() in aA) and spell(LOCALE,m.group("vow_1"))'], [u'(?u)((?<=[!?.] )|^)A(
 ?P<_>[ ][\'\u2018"\u201c]?)(?P<con_1>[bcdfghj-np-tv-zBCDFGHJ-NP-TV-Z]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'An\\g<_>\\g<con_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', u'm.group("con_1") in aAN or m.group("con_1").lower() in aAN'], [u'(?u)(?<![-\\w\u2013.,\xad])a(?P<_>[ ][\'\u2018"\u201c]?)(?P<nvow_1>(8[0-9]*|1[18](000)*)(th)?)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'an\\g<_>\\g<nvow_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', False], [u'(?u)((?<=[!?.] )|^)A(?P<_>[ ][\'\u2018"\u201c]?)(?P<nvow_1>(8[0-9]*|1[18](000)*)(th)?)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'An\\g<_>\\g<nvow_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', False], [u'(?u)(?<![-\\w\u2013.,\xad])(?P<a_1>[Aa])n(?P<_>[ ][\'\u2018"\u201c]?)(?P<ncon_1>
 [0-79][0-9]*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'\\g<a_1>\\g<_>\\g<ncon_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Discrimination_between_a_and_an', u'not m.group("ncon_1")[:2] in ["11", "18"]'], [u'(?u)(?<![-\\w\u2013.,\xad])(^)(?P<low_1>[a-z]+)(?![-\\w\u2013\xad])', u'= m.group("low_1").capitalize()', u'Missing capitalization?', u'paralcap.search(TEXT) and not abbrev.search(TEXT)'], [u'(?u)((?<=[!?.] )|^)(?P<low_1>[a-z]+)(?![-\\w\u2013\xad])', u'= m.group("low_1").capitalize()', u'Missing capitalization?', u'option(LOCALE,"cap") and not abbrev.search(TEXT)'], [u'(?u) ([.?!,:;)\u201d\\]])\\b', u'\\1 ', u'Reversed space and punctuation?', False], [u'(?u) +[.]', u'.', u'Extra space before the period?', u'LOCALE.Country == "US"'], [u'(?u) +[.]', u'.', u'Extra space before the full stop?', u'LOCALE.Country != "US"'], [u'(?u) +([?!,:;)\u201d\\]])', u'\\1', u'= "Extra space before the " + punct[m.group(1)] + "?"', False],
  [u'(?u)([([\u201c]) ', u'\\1', u'= "Extra space after the " + punct[m.group(1)] + "?"', False], [u'(?u)\\b(---?| --? )\\b', u' \u2013 \\n\u2014', u'En dash or em dash:', u'not option(LOCALE,"ndash") and not option(LOCALE,"mdash")'], [u'(?u)\\b(---?| --? |\u2014)\\b', u' \u2013 ', u'En dash:', u'option(LOCALE,"ndash") and not option(LOCALE,"mdash")'], [u'(?u)\\b(---?| --? | \u2013 )\\b', u'\u2014', u'Em dash:', u'option(LOCALE,"mdash")'], [u'(?u)(?P<number_1>\\d+([.]\\d+)?)(x| x )(?P<number_2>\\d+([.]\\d+)?)', u'\\g<number_1>\xd7\\g<number_2>', u'Multiplication sign. \\n http://en.wikipedia.org/wiki/Multiplication_sign', u'option(LOCALE,"times")'], [u'(?u)(?P<Abc_1>[a-zA-Z]+)(?P<pun_1>[?!,:;%\u2030\u2031\u02da\u201c\u201d\u2018])(?P<Abc_2>[a-zA-Z]+)', u'\\g<Abc_1>\\g<pun_1> \\g<Abc_2>', u'Missing space?', False], [u'(?u)(?P<abc_1>[a-z]+)[.](?P<ABC_1>[A-Z]+)', u'\\g<abc_1>. \\g<ABC_1>', u'Missing space?', False], [u'(?u)[)]', u'', u'Extra closing parenthesis?', u'option(LOCAL
 E,"pair") and not "(" in TEXT'], [u'(?u)[(]', u'', u'Extra opening parenthesis?', u'option(LOCALE,"pair") and TEXT[-1] in u"?!;:\u201d\u2019" and not ")" in TEXT'], [u'(?u)(?<![0-9])\u201d', u'', u'Extra quotation mark?', u'option(LOCALE,"pair") and not u"\u201c" in TEXT'], [u'(?u)(?<=[0-9])\u201d', u'\u2033\\n', u'Bad double prime or extra quotation mark?', u'option(LOCALE,"apostrophe") and not u"\u201c" in TEXT'], [u'(?u)\u201c', u'', u'Extra quotation mark?', u'option(LOCALE,"pair") and TEXT[-1] in u"?!;:\u201d\u2019" and not u"\u201d" in TEXT'], [u'(?u)[.]{3}', u'\u2026', u'Ellipsis.', u'option(LOCALE,"ellipsis")'], [u'(?u)\\b {2,3}(\\b|$)', u'\\1 ', u'Extra space.', u'option(LOCALE,"spaces")'], [u'(?u)(^|\\b|(?P<pun_1>[?!,:;%\u2030\u2031\u02da\u201c\u201d\u2018])|[.]) {2,3}(\\b|$)', u'\\1 ', u'Extra space.', u'option(LOCALE,"spaces2")'], [u'(?u)(^|\\b|(?P<pun_1>[?!,:;%\u2030\u2031\u02da\u201c\u201d\u2018])|[.]) {4,}(\\b|$)', u'\\1 \\n\t', u'Change multiple spaces to a s
 ingle space or a tabulator:', u'option(LOCALE,"spaces3")'], [u'(?iu)[\\"\u201c\u201d\u201f\u201e]((?P<abc_1>[a-zA-Z]+)[^\\"\u201c\u201d\u201f\u201e]*)[\\"\u201c\u201f]', u'\u201c\\1\u201d', u'Quotation marks.', u'option(LOCALE,"quotation")'], [u'(?iu)[\\"\u201d\u201f\u201e]((?P<abc_1>[a-zA-Z]+)[^\\"\u201c\u201d\u201f\u201e]*)[\\"\u201c\u201d\u201f]', u'\u201c\\1\u201d', u'Quotation marks.', u'option(LOCALE,"quotation")'], [u"(?iu)'(?P<abc_1>[a-zA-Z]+)'", u'\u2018\\g<abc_1>\u2019', u'Quotation marks.', u'option(LOCALE,"apostrophe")'], [u'(?iu)[\\"\u201d\u201f\u201e]((?P<abc_1>[a-zA-Z]+)[^\\"\u201c\u201d\u201f\u201e]*)[\\"\u201c\u201d\u201f]', u'\u201c\\1\u201d', u'Quotation marks.', u'option(LOCALE,"apostrophe")'], [u"(?iu)(?P<Abc_1>[a-zA-ZA-Z]+)'(?P<w_1>\\w*)", u'\\g<Abc_1>\u2019\\g<w_1>', u'Replace typewriter apostrophe or quotation mark:', u'option(LOCALE,"apostrophe")'], [u"(?u)(?<= )'(?P<Abc_1>[a-zA-Z]+)", u'\u2018\\g<Abc_1>\\n\u2019\\g<Abc_1>', u'Replace typewriter quot
 ation mark or apostrophe:', u'option(LOCALE,"apostrophe")'], [u"(?u)^'(?P<Abc_1>[a-zA-Z]+)", u'\u2018\\g<Abc_1>\\n\u2019\\g<Abc_1>', u'Replace typewriter quotation mark or apostrophe:', u'option(LOCALE,"apostrophe")'], [u'(?u)\\b(?P<d2_1>\\d\\d)(?P<d_1>\\d\\d\\d)\\b', u'\\g<d2_1>,\\g<d_1>\\n\\g<d2_1>\u202f\\g<d_1>', u'Use thousand separator (common or ISO).', u'option(LOCALE,"numsep")'], [u'(?u)\\b(?P<D_1>\\d{1,3})(?P<d_1>\\d\\d\\d)(?P<d_2>\\d\\d\\d)\\b', u'\\g<D_1>,\\g<d_1>,\\g<d_2>\\n\\g<D_1>\u202f\\g<d_1>\u202f\\g<d_2>', u'Use thousand separators (common or ISO).', u'option(LOCALE,"numsep")'], [u'(?u)\\b(?P<D_1>\\d{1,3})(?P<d_1>\\d\\d\\d)(?P<d_2>\\d\\d\\d)(?P<d_3>\\d\\d\\d)\\b', u'\\g<D_1>,\\g<d_1>,\\g<d_2>,\\g<d_3>\\n\\g<D_1>\u202f\\g<d_1>\u202f\\g<d_2>\u202f\\g<d_3>', u'Use thousand separators (common or ISO).', u'option(LOCALE,"numsep")'], [u'(?u)(?<![-\\w\u2013.,\xad])(?P<Abc_1>[a-zA-Z]+) \\1(?![-\\w\u2013\xad])', u'\\g<Abc_1>', u'Word duplication?', u'option(LOCALE,"
 dup")'], [u'(?u)(?<![-\\w\u2013.,\xad])([Tt])his (?P<abc_1>[a-z]+)(?![-\\w\u2013\xad])', u'\\1hese \\g<abc_1>\\n\\1his, \\g<abc_1>', u'Did you mean:', u'option(LOCALE,"grammar") and morph(LOCALE,m.group("abc_1"), "Ns")'], [u"(?u)(?<![-\\w\u2013.,\xad])with it['\u2019]s(?![-\\w\u2013\xad])", u'with its\\nwith, it\u2019s', u'Did you mean:', u'option(LOCALE,"grammar")'], [u"(?iu)(?<![-\\w\u2013.,\xad])([Ii][Tt]|[Ss]?[Hh][Ee]) [Dd][Oo][Nn]['\u2019][Tt](?![-\\w\u2013\xad])", u'\\1 doesn\u2019t', u'Did you mean:', u'option(LOCALE,"grammar")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) (\xb0F|Fahrenheit)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "F", "C", u" \xb0C", ".", ",")', u'Convert to Celsius:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) (\xb0C|Celsius)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "C", "F", u" \xb0F", ".", ",")', u'Convert to Fahrenheit:', u'option(LOCALE,"nonmetric")'], [u'(?u)(?<![
 -\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*(?: 1/2| ?\xbd)?) (ft|foot|feet)(?! [1-9])(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "ft", "cm", " cm", ".", ",") + "\\n" + measurement(m.group(1), "ft", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*(?: 1/2| ?\xbd)?) ft[.]? ([0-9]+(?: 1/2| ?\xbd)?) in(?![-\\w\u2013\xad])', u'= measurement(m.group(1) + "*12+" + m.group(2), "in", "cm", " cm", ".", ",") + "\\n" + measurement(m.group(1) + "*12+" + m.group(2), "in", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*(?: 1/2| ?\xbd)?) in(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "in", "mm", " mm", ".", ",") + "\\n" + measurement(m.group(1), "in", "cm", " cm", ".", ",") + "\\n" + measurement(m.group(1), "in", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u201
 3.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) mm(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "mm", "in", " in", ".", ",")', u'Convert from metric:', u'option(LOCALE,"nonmetric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) cm(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "cm", "in", " in", ".", ",") + "\\n" + measurement(m.group(1), "cm", "ft", " ft", ".", ",")', u'Convert from metric:', u'option(LOCALE,"nonmetric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) (m|meter|metre)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "m", "in", " in", ".", ",") + "\\n" + measurement(m.group(1), "m", "ft", " ft", ".", ",") + "\\n" + measurement(m.group(1), "m", "mi", " mi", ".", ",")', u'Convert from metric:', u'option(LOCALE,"nonmetric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*(?: 1/2| ?\xbd)?) miles?(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "mi", "m", " m", ".", ",") + "\\n" + measurement(m.group(1), "mi", "km", " 
 km", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) km(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "km", "mi", " mi", ".", ",")', u'Convert to miles:', u'option(LOCALE,"nonmetric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (yd|yards?)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "yd", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (gal(lons?)?)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "gal", "l", " l", ".", ",") + "\\n" + measurement(m.group(1), "uk_gal", "l", " l (in UK)", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (pint)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "pt", "dl", " dl", ".", ",") + "\\n" + measurement(m.group(1), "uk_pt", "dl", " dl (in UK)", ".", ",") + "\\n" + measurement(m.group
 (1), "pt", "l", " l", ".", ",") + "\\n" + measurement(m.group(1), "uk_pt", "l", " l (in UK)", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (l|L|litres?|liters?)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "l", "gal", " gal", ".", ",") + "\\n" + measurement(m.group(1), "l", "gal", " gal (in UK)", ".", ",")', u'Convert to gallons:', u'option(LOCALE,"nonmetric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) lbs?[.]?(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "lbm", "kg", " kg", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) kg[.]?(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "kg", "lbm", " lb", ".", ",")', u'Convert to pounds:', u'option(LOCALE,"nonmetric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) mph(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "mph", "km/h", " km/h
 ", ".", ",")', u'Convert to km/hour:', u'option(LOCALE,"metric")'], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) km/h(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "km/h", "mph", " mph", ".", ",")', u'Convert to miles/hour:', u'option(LOCALE,"nonmetric")']]
\ No newline at end of file
+# -*- encoding: UTF-8 -*-
+dic = [[u'(?u)(?<![-\\w\u2013.,\xad])and and(?![-\\w\u2013\xad])', u'and', u'Did you mean:', False, 0], [u'(?u)(?<![-\\w\u2013.,\xad])or or(?![-\\w\u2013\xad])', u'or', u'Did you mean:', False, 0], [u'(?u)(?<![-\\w\u2013.,\xad])for for(?![-\\w\u2013\xad])', u'for', u'Did you mean:', False, 0], [u'(?u)(?<![-\\w\u2013.,\xad])the the(?![-\\w\u2013\xad])', u'the', u'Did you mean:', False, 0], [u'(?iu)(?<![-\\w\u2013.,\xad])[Yy][Ii][Nn][Gg] [Aa][Nn][Dd] [Yy][Aa][Nn][Gg](?![-\\w\u2013\xad])', u'yin and yang', u'Did you mean:', False, 0], [u'(?iu)(?<![-\\w\u2013.,\xad])[Ss][Cc][Oo][Tt] [Ff][Rr][Ee][Ee](?![-\\w\u2013\xad])', u'scot-free\\nscotfree', u'Did you mean:', False, 0], [u"(?iu)(?<![-\\w\u2013.,\xad])([Yy][Oo][Uu][Rr]|[Hh][Ee][Rr]|[Oo][Uu][Rr]|[Tt][Hh][Ee][Ii][Rr])['\u2019][Ss](?![-\\w\u2013\xad])", u'\\1s', u'Possessive pronoun: \\n http://en.wikipedia.org/wiki/Possessive_pronoun', False, 0], [u'(?u)(?<![-\\w\u2013.,\xad])(?P<a_1>[Aa])n(?P<_>[ ][\'\u2018"\u201c]?)(?P<vow_1>
 [aeiouAEIOU]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'\\g<a_1>\\g<_>\\g<vow_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', u'm.group("vow_1") in aA or m.group("vow_1").lower() in aA', 0], [u'(?u)(?<![-\\w\u2013.,\xad])a(?P<_>[ ][\'\u2018"\u201c]?)(?P<vow_1>[aeiouAEIOU]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'an\\g<_>\\g<vow_1>\\g<etc_1>', u'Bad article? \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', u'(m.group("vow_1") <> m.group("vow_1").upper()) and not (m.group("vow_1") in aA or m.group("vow_1").lower() in aA) and spell(LOCALE,m.group("vow_1"))', 0], [u'(?u)(?<![-\\w\u2013.,\xad])a(?P<_>[ ][\'\u2018"\u201c]?)(?P<con_1>[bcdfghj-np-tv-zBCDFGHJ-NP-TV-Z]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'an\\g<_>\\g<con_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an'
 , u'm.group("con_1") in aAN or m.group("con_1").lower() in aAN', 0], [u'(?u)(?<![-\\w\u2013.,\xad])(?P<a_1>[Aa])n(?P<_>[ ][\'\u2018"\u201c]?)(?P<con_1>[bcdfghj-np-tv-zBCDFGHJ-NP-TV-Z]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'\\g<a_1>\\g<_>\\g<con_1>\\g<etc_1>', u'Bad article? \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', u'(m.group("con_1") <> m.group("con_1").upper()) and not (m.group("con_1") in aA or m.group("con_1").lower() in aAN) and not m.group("con_1") in aB and spell(LOCALE,m.group("con_1"))', 0], [u'(?u)((?<=[!?.] )|^)A(?P<_>[ ][\'\u2018"\u201c]?)(?P<vow_1>[aeiouAEIOU]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'An\\g<_>\\g<vow_1>\\g<etc_1>', u'Bad article? \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', u'(m.group("vow_1") <> m.group("vow_1").upper()) and not (m.group("vow_1") in aA or m.group("vow_1").lower() in aA) and spell(LOCALE,m.group("vow_1"))', 0], [u'
 (?u)((?<=[!?.] )|^)A(?P<_>[ ][\'\u2018"\u201c]?)(?P<con_1>[bcdfghj-np-tv-zBCDFGHJ-NP-TV-Z]\\w*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'An\\g<_>\\g<con_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', u'm.group("con_1") in aAN or m.group("con_1").lower() in aAN', 0], [u'(?u)(?<![-\\w\u2013.,\xad])a(?P<_>[ ][\'\u2018"\u201c]?)(?P<nvow_1>(8[0-9]*|1[18](000)*)(th)?)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'an\\g<_>\\g<nvow_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', False, 0], [u'(?u)((?<=[!?.] )|^)A(?P<_>[ ][\'\u2018"\u201c]?)(?P<nvow_1>(8[0-9]*|1[18](000)*)(th)?)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'An\\g<_>\\g<nvow_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', False, 0], [u'(?u)(?<![-\\w\u2013.,\xad])(?P<a_1>[Aa])n(?P<_>[ ][\'\u2018
 "\u201c]?)(?P<ncon_1>[0-79][0-9]*)(?P<etc_1>[-\u2013\'\u2019\\w]*)(?![-\\w\u2013\xad])', u'\\g<a_1>\\g<_>\\g<ncon_1>\\g<etc_1>', u'Did you mean: \\n http://en.wikipedia.org/wiki/English_articles#Distinction_between_a_and_an', u'not m.group("ncon_1")[:2] in ["11", "18"]', 0], [u'(?u)(?<![-\\w\u2013.,\xad])(^)(?P<low_1>[a-z]+)(?![-\\w\u2013\xad])', u'= m.group("low_1").capitalize()', u'Missing capitalization?', u'paralcap.search(TEXT) and not abbrev.search(TEXT)', 0], [u'(?u)((?<=[!?.] )|^)(?P<low_1>[a-z]+)(?![-\\w\u2013\xad])', u'= m.group("low_1").capitalize()', u'Missing capitalization?', u'option(LOCALE,"cap") and not abbrev.search(TEXT)', 0], [u'(?u) ([.?!,:;)\u201d\\]])\\b', u'\\1 ', u'Reversed space and punctuation?', False, 0], [u'(?u) +[.]', u'.', u'Extra space before the period?', u'LOCALE.Country == "US"', 0], [u'(?u) +[.]', u'.', u'Extra space before the full stop?', u'LOCALE.Country != "US"', 0], [u'(?u) +([?!,:;)\u201d\\]])', u'\\1', u'= "Extra space before the "
  + punct[m.group(1)] + "?"', False, 0], [u'(?u)([([\u201c]) ', u'\\1', u'= "Extra space after the " + punct[m.group(1)] + "?"', False, 0], [u'(?u)\\b(---?| --? )\\b', u' \u2013 \\n\u2014', u'En dash or em dash:', u'not option(LOCALE,"ndash") and not option(LOCALE,"mdash")', 0], [u'(?u)\\b(---?| --? |\u2014)\\b', u' \u2013 ', u'En dash:', u'option(LOCALE,"ndash") and not option(LOCALE,"mdash")', 0], [u'(?u)\\b(---?| --? | \u2013 )\\b', u'\u2014', u'Em dash:', u'option(LOCALE,"mdash")', 0], [u'(?u)(?P<number_1>\\d+([.]\\d+)?)(x| x )(?P<number_2>\\d+([.]\\d+)?)', u'\\g<number_1>\xd7\\g<number_2>', u'Multiplication sign. \\n http://en.wikipedia.org/wiki/Multiplication_sign', u'option(LOCALE,"times")', 0], [u'(?u)(?P<Abc_1>[a-zA-Z]+)(?P<pun_1>[?!,:;%\u2030\u2031\u02da\u201c\u201d\u2018])(?P<Abc_2>[a-zA-Z]+)', u'\\g<Abc_1>\\g<pun_1> \\g<Abc_2>', u'Missing space?', False, 0], [u'(?u)(?P<abc_1>[a-z]+)[.](?P<ABC_1>[A-Z]+)', u'\\g<abc_1>. \\g<ABC_1>', u'Missing space?', False, 0], [u'
 (?u)[)]', u'', u'Extra closing parenthesis?', u'option(LOCALE,"pair") and not "(" in TEXT', 0], [u'(?u)[(]', u'', u'Extra opening parenthesis?', u'option(LOCALE,"pair") and TEXT[-1] in u"?!;:\u201d\u2019" and not ")" in TEXT', 0], [u'(?u)(?<![0-9])\u201d', u'', u'Extra quotation mark?', u'option(LOCALE,"pair") and not u"\u201c" in TEXT', 0], [u'(?u)(?<=[0-9])\u201d', u'\u2033\\n', u'Bad double prime or extra quotation mark?', u'option(LOCALE,"apostrophe") and not u"\u201c" in TEXT', 0], [u'(?u)\u201c', u'', u'Extra quotation mark?', u'option(LOCALE,"pair") and TEXT[-1] in u"?!;:\u201d\u2019" and not u"\u201d" in TEXT', 0], [u'(?u)[.]{3}', u'\u2026', u'Ellipsis.', u'option(LOCALE,"ellipsis")', 0], [u'(?u)\\b {2,3}(\\b|$)', u'\\1 ', u'Extra space.', u'option(LOCALE,"spaces")', 0], [u'(?u)(^|\\b|(?P<pun_1>[?!,:;%\u2030\u2031\u02da\u201c\u201d\u2018])|[.]) {2,3}(\\b|$)', u'\\1 ', u'Extra space.', u'option(LOCALE,"spaces2")', 0], [u'(?u)(^|\\b|(?P<pun_1>[?!,:;%\u2030\u2031\u02da\
 u201c\u201d\u2018])|[.]) {4,}(\\b|$)', u'\\1 \\n\t', u'Change multiple spaces to a single space or a tabulator:', u'option(LOCALE,"spaces3")', 0], [u'(?iu)[\\"\u201c\u201d\u201f\u201e]((?P<abc_1>[a-zA-Z]+)[^\\"\u201c\u201d\u201f\u201e]*)[\\"\u201c\u201f]', u'\u201c\\1\u201d', u'Quotation marks.', u'option(LOCALE,"quotation")', 0], [u'(?iu)[\\"\u201d\u201f\u201e]((?P<abc_1>[a-zA-Z]+)[^\\"\u201c\u201d\u201f\u201e]*)[\\"\u201c\u201d\u201f]', u'\u201c\\1\u201d', u'Quotation marks.', u'option(LOCALE,"quotation")', 0], [u"(?iu)'(?P<abc_1>[a-zA-Z]+)'", u'\u2018\\g<abc_1>\u2019', u'Quotation marks.', u'option(LOCALE,"apostrophe")', 0], [u'(?iu)[\\"\u201d\u201f\u201e]((?P<abc_1>[a-zA-Z]+)[^\\"\u201c\u201d\u201f\u201e]*)[\\"\u201c\u201d\u201f]', u'\u201c\\1\u201d', u'Quotation marks.', u'option(LOCALE,"apostrophe")', 0], [u"(?iu)(?P<Abc_1>[a-zA-ZA-Z]+)'(?P<w_1>\\w*)", u'\\g<Abc_1>\u2019\\g<w_1>', u'Replace typewriter apostrophe or quotation mark:', u'option(LOCALE,"apostrophe")', 0], 
 [u"(?u)(?<= )'(?P<Abc_1>[a-zA-Z]+)", u'\u2018\\g<Abc_1>\\n\u2019\\g<Abc_1>', u'Replace typewriter quotation mark or apostrophe:', u'option(LOCALE,"apostrophe")', 0], [u"(?u)^'(?P<Abc_1>[a-zA-Z]+)", u'\u2018\\g<Abc_1>\\n\u2019\\g<Abc_1>', u'Replace typewriter quotation mark or apostrophe:', u'option(LOCALE,"apostrophe")', 0], [u'(?u)\\b(?P<d2_1>\\d\\d)(?P<d_1>\\d\\d\\d)\\b', u'\\g<d2_1>,\\g<d_1>\\n\\g<d2_1>\u202f\\g<d_1>', u'Use thousand separator (common or ISO).', u'option(LOCALE,"numsep")', 0], [u'(?u)\\b(?P<D_1>\\d{1,3})(?P<d_1>\\d\\d\\d)(?P<d_2>\\d\\d\\d)\\b', u'\\g<D_1>,\\g<d_1>,\\g<d_2>\\n\\g<D_1>\u202f\\g<d_1>\u202f\\g<d_2>', u'Use thousand separators (common or ISO).', u'option(LOCALE,"numsep")', 0], [u'(?u)\\b(?P<D_1>\\d{1,3})(?P<d_1>\\d\\d\\d)(?P<d_2>\\d\\d\\d)(?P<d_3>\\d\\d\\d)\\b', u'\\g<D_1>,\\g<d_1>,\\g<d_2>,\\g<d_3>\\n\\g<D_1>\u202f\\g<d_1>\u202f\\g<d_2>\u202f\\g<d_3>', u'Use thousand separators (common or ISO).', u'option(LOCALE,"numsep")', 0], [u'(?u)(?<![-\
 \w\u2013.,\xad])(?P<Abc_1>[a-zA-Z]+) \\1(?![-\\w\u2013\xad])', u'\\g<Abc_1>', u'Word duplication?', u'option(LOCALE,"dup")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([Tt])his (?P<abc_1>[a-z]+)(?![-\\w\u2013\xad])', u'\\1hese \\g<abc_1>\\n\\1his, \\g<abc_1>', u'Did you mean:', u'option(LOCALE,"grammar") and morph(LOCALE,m.group("abc_1"), "Ns")', 0], [u"(?u)(?<![-\\w\u2013.,\xad])with it['\u2019]s(?![-\\w\u2013\xad])", u'with its\\nwith, it\u2019s', u'Did you mean:', u'option(LOCALE,"grammar")', 0], [u"(?iu)(?<![-\\w\u2013.,\xad])([Ii][Tt]|[Ss]?[Hh][Ee]) [Dd][Oo][Nn]['\u2019][Tt](?![-\\w\u2013\xad])", u'\\1 doesn\u2019t', u'Did you mean:', u'option(LOCALE,"grammar")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) (\xb0F|Fahrenheit)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "F", "C", u" \xb0C", ".", ",")', u'Convert to Celsius:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) (\xb0C|Celsius)(?![-\\w\u2013\xad])', 
 u'= measurement(m.group(1), "C", "F", u" \xb0F", ".", ",")', u'Convert to Fahrenheit:', u'option(LOCALE,"nonmetric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*(?: 1/2| ?\xbd)?) (ft|foot|feet)(?! [1-9])(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "ft", "cm", " cm", ".", ",") + "\\n" + measurement(m.group(1), "ft", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*(?: 1/2| ?\xbd)?) ft[.]? ([0-9]+(?: 1/2| ?\xbd)?) in(?![-\\w\u2013\xad])', u'= measurement(m.group(1) + "*12+" + m.group(2), "in", "cm", " cm", ".", ",") + "\\n" + measurement(m.group(1) + "*12+" + m.group(2), "in", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*(?: 1/2| ?\xbd)?) in(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "in", "mm", " mm", ".", ",") + "\\n" + measurement(m.group(1), "in", "cm", " cm", ".", 
 ",") + "\\n" + measurement(m.group(1), "in", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) mm(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "mm", "in", " in", ".", ",")', u'Convert from metric:', u'option(LOCALE,"nonmetric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) cm(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "cm", "in", " in", ".", ",") + "\\n" + measurement(m.group(1), "cm", "ft", " ft", ".", ",")', u'Convert from metric:', u'option(LOCALE,"nonmetric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) (m|meter|metre)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "m", "in", " in", ".", ",") + "\\n" + measurement(m.group(1), "m", "ft", " ft", ".", ",") + "\\n" + measurement(m.group(1), "m", "mi", " mi", ".", ",")', u'Convert from metric:', u'option(LOCALE,"nonmetric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d
 +)*(?: 1/2| ?\xbd)?) miles?(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "mi", "m", " m", ".", ",") + "\\n" + measurement(m.group(1), "mi", "km", " km", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) km(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "km", "mi", " mi", ".", ",")', u'Convert to miles:', u'option(LOCALE,"nonmetric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (yd|yards?)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "yd", "m", " m", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (gal(lons?)?)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "gal", "l", " l", ".", ",") + "\\n" + measurement(m.group(1), "uk_gal", "l", " l (in UK)", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (pint)(?![-\\w\u2013\xad
 ])', u'= measurement(m.group(1), "pt", "dl", " dl", ".", ",") + "\\n" + measurement(m.group(1), "uk_pt", "dl", " dl (in UK)", ".", ",") + "\\n" + measurement(m.group(1), "pt", "l", " l", ".", ",") + "\\n" + measurement(m.group(1), "uk_pt", "l", " l (in UK)", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:,\\d+)?) (l|L|litres?|liters?)(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "l", "gal", " gal", ".", ",") + "\\n" + measurement(m.group(1), "l", "gal", " gal (in UK)", ".", ",")', u'Convert to gallons:', u'option(LOCALE,"nonmetric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) lbs?[.]?(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "lbm", "kg", " kg", ".", ",")', u'Convert to metric:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) kg[.]?(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "kg", "lbm", " lb", ".", ",")', u'Convert to pou
 nds:', u'option(LOCALE,"nonmetric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) mph(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "mph", "km/h", " km/h", ".", ",")', u'Convert to km/hour:', u'option(LOCALE,"metric")', 0], [u'(?u)(?<![-\\w\u2013.,\xad])([-\u2212]?\\d+(?:[,.]\\d+)*) km/h(?![-\\w\u2013\xad])', u'= measurement(m.group(1), "km/h", "mph", " mph", ".", ",")', u'Convert to miles/hour:', u'option(LOCALE,"nonmetric")', 0]]
+
diff --git a/dictionaries/en/pythonpath/lightproof_handler_en.py b/dictionaries/en/pythonpath/lightproof_handler_en.py
index e8c458a..f69ccf1 100644
--- a/dictionaries/en/pythonpath/lightproof_handler_en.py
+++ b/dictionaries/en/pythonpath/lightproof_handler_en.py
@@ -1,8 +1,6 @@
 import uno
 import unohelper
-
-from lightproof_opts_en import lopts
-from lightproof_opts_en import lopts_default
+import lightproof_opts_en
 from lightproof_impl_en import pkg
 
 from com.sun.star.lang import XServiceInfo
@@ -14,7 +12,7 @@ options = {}
 def load(context):
     try:
         l = LightproofOptionsEventHandler(context)
-        for i in lopts:
+        for i in lightproof_opts_en.lopts:
             l.load(i)
     except:
         pass
@@ -24,9 +22,9 @@ def get_option(page, option):
         return options[page + "," + option]
     except:
         try:
-                return options[page[:2] + "," + option]
+            return options[page[:2] + "," + option]
         except:
-                return 0
+            return 0
 
 def set_option(page, option, value):
     options[page + "," + option] = int(value)
@@ -45,7 +43,7 @@ class LightproofOptionsEventHandler( unohelper.Base, XServiceInfo, XContainerWin
     # XContainerWindowEventHandler
     def callHandlerMethod(self, aWindow, aEventObject, sMethod):
         if sMethod == "external_event":
-                return self.handleExternalEvent(aWindow, aEventObject)
+            return self.handleExternalEvent(aWindow, aEventObject)
 
     def getSupportedMethodNames(self):
         return ("external_event", )
@@ -53,65 +51,65 @@ class LightproofOptionsEventHandler( unohelper.Base, XServiceInfo, XContainerWin
     def handleExternalEvent(self, aWindow, aEventObject):
         sMethod = aEventObject
         if sMethod == "ok":
-                self.saveData(aWindow)
+            self.saveData(aWindow)
         elif sMethod == "back" or sMethod == "initialize":
-                self.loadData(aWindow)
+            self.loadData(aWindow)
         return True
 
     def load(self, sWindowName):
         child = self.getChild(sWindowName)
-        for i in lopts[sWindowName]:
-                sValue = child.getPropertyValue(i)
-                if sValue == '':
-                    if i in lopts_default[sWindowName]:
-                        sValue = 1
-                    else:
-                        sValue = 0
-                set_option(sWindowName, i, sValue)
+        for i in lightproof_opts_en.lopts[sWindowName]:
+            sValue = child.getPropertyValue(i)
+            if sValue == '':
+                if i in lightproof_opts_en.lopts_default[sWindowName]:
+                    sValue = 1
+                else:
+                    sValue = 0
+            set_option(sWindowName, i, sValue)
 
     def loadData(self, aWindow):
         sWindowName = self.getWindowName(aWindow)
         if (sWindowName == None):
-                return
+            return
         child = self.getChild(sWindowName)
-        for i in lopts[sWindowName]:
-                sValue = child.getPropertyValue(i)
-                if sValue == '':
-                    if i in lopts_default[sWindowName]:
-                        sValue = 1
-                    else:
-                        sValue = 0
-                xControl = aWindow.getControl(i)
-                xControl.State = sValue
-                set_option(sWindowName, i, sValue)
+        for i in lightproof_opts_en.lopts[sWindowName]:
+            sValue = child.getPropertyValue(i)
+            if sValue == '':
+                if i in lightproof_opts_en.lopts_default[sWindowName]:
+                    sValue = 1
+                else:
+                    sValue = 0
+            xControl = aWindow.getControl(i)
+            xControl.State = sValue
+            set_option(sWindowName, i, sValue)
 
     def saveData(self, aWindow):
         sWindowName = self.getWindowName(aWindow)
         if (sWindowName == None):
-                return
+            return
         child = self.getChild(sWindowName)
-        for i in lopts[sWindowName]:
-                xControl = aWindow.getControl(i)
-                sValue = xControl.State
-                child.setPropertyValue(i, str(sValue))
-                set_option(sWindowName, i, sValue)
+        for i in lightproof_opts_en.lopts[sWindowName]:
+            xControl = aWindow.getControl(i)
+            sValue = xControl.State
+            child.setPropertyValue(i, str(sValue))
+            set_option(sWindowName, i, sValue)
         self.commitChanges()
 
     def getWindowName(self, aWindow):
         sName = aWindow.getModel().Name
-        if sName in lopts:
-                return sName
+        if sName in lightproof_opts_en.lopts:
+            return sName
         return None
 
     # XServiceInfo method implementations
     def getImplementationName (self):
-                return self.ImplementationName
+        return self.ImplementationName
 
     def supportsService(self, ServiceName):
-                return (ServiceName in self.services)
+        return (ServiceName in self.services)
 
     def getSupportedServiceNames (self):
-                return self.services
+        return self.services
 
     def getChild(self, name):
         return self.node.getByName(name)
diff --git a/dictionaries/en/pythonpath/lightproof_impl_en.py b/dictionaries/en/pythonpath/lightproof_impl_en.py
index b09902b..dd1adb0 100644
--- a/dictionaries/en/pythonpath/lightproof_impl_en.py
+++ b/dictionaries/en/pythonpath/lightproof_impl_en.py
@@ -1,6 +1,336 @@
 # -*- encoding: UTF-8 -*-
+import uno, re, sys, os, traceback
+from string import join
+from com.sun.star.text.TextMarkupType import PROOFREADING
+from com.sun.star.beans import PropertyValue
+
 pkg = "en"
 lang = "en"
 locales = {'en-GB': ['en', 'GB', ''], 'en-ZW': ['en', 'ZW', ''], 'en-PH': ['en', 'PH', ''], 'en-TT': ['en', 'TT', ''], 'en-BZ': ['en', 'BZ', ''], 'en-NA': ['en', 'NA', ''], 'en-IE': ['en', 'IE', ''], 'en-GH': ['en', 'GH', ''], 'en-US': ['en', 'US', ''], 'en-IN': ['en', 'IN', ''], 'en-BS': ['en', 'BS', ''], 'en-JM': ['en', 'JM', ''], 'en-AU': ['en', 'AU', ''], 'en-NZ': ['en', 'NZ', ''], 'en-ZA': ['en', 'ZA', ''], 'en-CA': ['en', 'CA', '']}
-version = "0.2"
-author = ""
+version = "0.4.3"
+author = "László Németh"
+name = "Lightproof grammar checker (English)"
+
+import lightproof_handler_en
+
+# loaded rules (check for Update mechanism of the editor)
+try:
+    langrule
+except NameError:
+    langrule = {}
+
+# ignored rules
+ignore = {}
+
+# cache for morphogical analyses
+analyses = {}
+stems = {}
+suggestions = {}
+
+# assign Calc functions
+calcfunc = None
+
+# check settings
+def option(lang, opt):
+    return lightproof_handler_en.get_option(lang.Language + "_" + lang.Country, opt)
+
+# filtering affix fields (ds, is, ts etc.)
+def onlymorph(st):
+    if st != None:
+        st = re.sub(r"^.*(st:|po:)", r"\\1", st) # keep last word part
+        st = re.sub(r"\\b(?=[dit][sp]:)","@", st) # and its affixes
+        st = re.sub(r"(?<!@)\\b\w\w:\w+","", st).replace('@','').strip()
+    return st
+
+# if the pattern matches all analyses of the input word, 
+# return the last matched substring
+def _morph(rLoc, word, pattern, all, onlyaffix):
+    global analyses
+    if not word:
+        return None
+    if word not in analyses:
+        x = spellchecker.spell(u"<?xml?><query type='analyze'><word>" + word + "</word></query>", rLoc, ())
+        if not x:
+            return None
+        t = x.getAlternatives()
+        if not t:
+            t = [""]
+        analyses[word] = t[0].split("</a>")[:-1]
+    a = analyses[word]
+    result = None
+    p = re.compile(pattern)
+    for i in a:
+        if onlyaffix:
+            i = onlymorph(i)
+        result = p.search(i)
+        if result:
+            result = result.group(0)
+            if not all:
+                return result
+        elif all:
+            return None
+    return result
+
+def morph(rLoc, word, pattern, all=True):
+    return _morph(rLoc, word, pattern, all, False)
+
+def affix(rLoc, word, pattern, all=True):
+    return _morph(rLoc, word, pattern, all, True)
+
+def spell(rLoc, word):
+    if not word:
+        return None
+    return spellchecker.isValid(word, rLoc, ())
+
+# get the tuple of the stem of the word or an empty array
+def stem(rLoc, word):
+    global stems
+    if not word:
+        return []
+    if not word in stems:
+        x = spellchecker.spell(u"<?xml?><query type='stem'><word>" + word + "</word></query>", rLoc, ())
+        if not x:
+            return []
+        t = x.getAlternatives()
+        if not t:
+            t = []
+        stems[word] = list(t)
+    return stems[word]
+
+# get the tuple of the morphological generation of a word or an empty array
+def generate(rLoc, word, example):
+    if not word:
+        return []
+    x = spellchecker.spell(u"<?xml?><query type='generate'><word>" + word + "</word><word>" + example + "</word></query>", rLoc, ())
+    if not x:
+        return []
+    t = x.getAlternatives()
+    if not t:
+        t = []
+    return list(t)
+
+# get suggestions
+def suggest(rLoc, word):
+    global suggestions
+    if not word:
+        return word
+    if word not in suggestions:
+        x = spellchecker.spell("_" + word, rLoc, ())
+        if not x:
+            return word
+        t = x.getAlternatives()
+        suggestions[word] = join(t, "\\n")
+    return suggestions[word]
+
+# get the nth word of the input string or None
+def word(s, n):
+    a = re.match("(?u)( [-.\w%%]+){" + str(n-1) + "}( [-.\w%%]+)", s)
+    if not a:
+        return ''
+    return a.group(2)[1:]
+
+# get the (-)nth word of the input string or None
+def wordmin(s, n):
+    a = re.search("(?u)([-.\w%%]+ )([-.\w%%]+ ){" + str(n-1) + "}$", s)
+    if not a:
+        return ''
+    return a.group(1)[:-1]
+
+def calc(funcname, par):
+    global calcfunc
+    global SMGR
+    if calcfunc == None:
+        calcfunc = SMGR.createInstance( "com.sun.star.sheet.FunctionAccess")
+        if calcfunc == None:
+                return None
+    return calcfunc.callFunction(funcname, par)
+
+def proofread( nDocId, TEXT, LOCALE, nStartOfSentencePos, nSuggestedSentenceEndPos, rProperties ):
+    global ignore
+    aErrs = []
+    s = TEXT[nStartOfSentencePos:nSuggestedSentenceEndPos]
+    for i in get_rule(LOCALE).dic:
+        # 0: regex,  1: replacement,  2: message,  3: condition,  4: ngroup,  (5: oldline),  6: case sensitive ?
+        if i[0] and not str(i[0]) in ignore:
+            for m in i[0].finditer(s):
+                try:
+                    if not i[3] or eval(i[3]):
+                        aErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
+                        aErr.nErrorStart        = nStartOfSentencePos + m.start(i[4]) # nStartOfSentencePos
+                        aErr.nErrorLength       = m.end(i[4]) - m.start(i[4])
+                        aErr.nErrorType         = PROOFREADING
+                        aErr.aRuleIdentifier    = str(i[0])
+                        iscap = (i[-1] and m.group(i[4])[0:1].isupper())
+                        if i[1][0:1] == "=":
+                            aErr.aSuggestions = tuple(cap(eval(i[1][1:]).replace('|', "\n").split("\n"), iscap, LOCALE))
+                        elif i[1] == "_":
+                            aErr.aSuggestions = ()
+                        else:
+                            aErr.aSuggestions = tuple(cap(m.expand(i[1]).replace('|', "\n").split("\n"), iscap, LOCALE))
+                        comment = i[2]
+                        if comment[0:1] == "=":
+                            comment = eval(comment[1:])
+                        else:
+                            comment = m.expand(comment)
+                        aErr.aShortComment      = comment.replace('|', '\n').replace('\\n', '\n').split("\n")[0].strip()
+                        aErr.aFullComment       = comment.replace('|', '\n').replace('\\n', '\n').split("\n")[-1].strip()
+                        if "://" in aErr.aFullComment:
+                            p = PropertyValue()
+                            p.Name = "FullCommentURL"
+                            p.Value = aErr.aFullComment
+                            aErr.aFullComment = aErr.aShortComment
+                            aErr.aProperties        = (p,)
+                        else:
+                            aErr.aProperties        = ()
+                        aErrs = aErrs + [aErr]
+                except Exception as e:
+                    if len(i) == 7:
+                        raise Exception(str(e), i[5])
+                    raise
+
+    return tuple(aErrs)
+
+def cap(a, iscap, rLoc):
+    if iscap:
+        for i in range(0, len(a)):
+            if a[i][0:1] == "i":
+                if rLoc.Language == "tr" or rLoc.Language == "az":
+                    a[i] = u"\u0130" + a[i][1:]
+                elif a[i][1:2] == "j" and rLoc.Language == "nl":
+                    a[i] = "IJ" + a[i][2:]
+                else:
+                    a[i] = "I" + a[i][1:]
+            else:
+                a[i] = a[i].capitalize()
+    return a
+
+def compile_rules(dic):
+    # compile regular expressions
+    for i in dic:
+        try:
+            if re.compile("[(][?]iu[)]").match(i[0]):
+                i += [True]
+                i[0] = re.sub("[(][?]iu[)]", "(?u)", i[0])
+            else:
+                i += [False]
+            i[0] = re.compile(i[0])
+        except:
+            if 'PYUNO_LOGLEVEL' in os.environ:
+                print("Lightproof: bad regular expression: ", traceback.format_exc())
+            i[0] = None
+
+def get_rule(loc):
+    try:
+        return langrule[pkg]
+    except:
+        langrule[pkg] = __import__("lightproof_" + pkg)
+        compile_rules(langrule[pkg].dic)
+    return langrule[pkg]
+
+def get_path():
+    return os.path.join(os.path.dirname(sys.modules[__name__].__file__), __name__ + ".py")
+
+# [code]
+
+# pattern matching for common English abbreviations
+abbrev = re.compile("(?i)\\b([a-z]|acct|approx|appt|apr|apt|assoc|asst|aug|ave|avg|co(nt|rp)?|ct|dec|defn|dept|dr|eg|equip|esp|est|etc|excl|ext|feb|fri|ft|govt?|hrs?|ib(id)?|ie|in(c|t)?|jan|jr|jul|lit|ln|mar|max|mi(n|sc)?|mon|Mrs?|mun|natl?|neg?|no(rm|s|v)?|nw|obj|oct|org|orig|pl|pos|prev|proj|psi|qty|rd|rec|rel|reqd?|resp|rev|sat|sci|se(p|pt)?|spec(if)?|sq|sr|st|subj|sun|sw|temp|thurs|tot|tues|univ|var|vs)\\.")
+
+# pattern for paragraph checking
+paralcap = re.compile(u"(?u)^[a-z].*[.?!] [A-Z].*[.?!][)\u201d]?$")
+
+
+punct = { "?": "question mark", "!": "exclamation mark",
+  ",": "comma", ":": "colon", ";": "semicolon",
+  "(": "opening parenthesis", ")": "closing parenthesis",
+  "[": "opening square bracket", "]": "closing square bracket",
+  u"\u201c": "opening quotation mark", u"\u201d": "closing quotation mark"}
+
+
+aA = set(["eucalypti", "eucalyptus", "Eucharist", "Eucharistic",
+"euchre", "euchred", "euchring", "Euclid", "euclidean", "Eudora",
+"eugene", "Eugenia", "eugenic", "eugenically", "eugenicist",
+"eugenicists", "eugenics", "Eugenio", "eukaryote", "Eula", "eulogies",
+"eulogist", "eulogists", "eulogistic", "eulogized", "eulogizer",
+"eulogizers", "eulogizing", "eulogy", "eulogies", "Eunice", "eunuch",
+"eunuchs", "Euphemia", "euphemism", "euphemisms", "euphemist",
+"euphemists", "euphemistic", "euphemistically", "euphonious",
+"euphoniously", "euphonium", "euphony", "euphoria", "euphoric",
+"Euphrates", "euphuism", "Eurasia", "Eurasian", "Eurasians", "eureka",
+"eurekas", "eurhythmic", "eurhythmy", "Euridyce", "Euripides", "euripus",
+"Euro", "Eurocentric", "Euroclydon", "Eurocommunism", "Eurocrat",
+"eurodollar", "Eurodollar", "Eurodollars", "Euromarket", "Europa",
+"Europe", "European", "Europeanisation", "Europeanise", "Europeanised",
+"Europeanization", "Europeanize", "Europeanized", "Europeans", "europium",
+"Eurovision", "Eustace", "Eustachian", "Eustacia", "euthanasia",
+"Ewart", "ewe", "Ewell", "ewer", "ewers", "Ewing", "once", "one",
+"oneness", "ones", "oneself", "onetime", "oneway", "oneyear", "u",
+"U", "UART", "ubiquitous", "ubiquity", "Udale", "Udall", "UEFA",
+"Uganda", "Ugandan", "ugric", "UK", "ukase", "Ukraine", "Ukrainian",
+"Ukrainians", "ukulele", "Ula", "ululated", "ululation", "Ulysses",
+"UN", "unanimity", "unanimous", "unanimously", "unary", "Unesco",
+"UNESCO", "UNHCR", "uni", "unicameral", "unicameralism", "Unicef",
+"UNICEF", "unicellular", "Unicode", "unicorn", "unicorns", "unicycle",
+"unicyclist", "unicyclists", "unidimensional", "unidirectional",
+"unidirectionality", "unifiable", "unification", "unified", "unifier",
+"unifilar", "uniform", "uniformally", "uniformed", "uniformer",
+"uniforming", "uniformisation", "uniformise", "uniformitarian",
+"uniformitarianism", "uniformity", "uniformly", "uniformness", "uniforms",
+"unify", "unifying", "unijugate", "unilateral", "unilateralisation",
+"unilateralise", "unilateralism", "unilateralist", "unilaterally",
+"unilinear", "unilingual", "uniliteral", "uniliteralism", "uniliteralist",
+"unimodal", "union", "unionism", "unionist", "unionists", "unionisation",
+"unionise", "unionised", "unionising", "unionization", "unionize",
+"unionized", "unionizing", "unions", "unipolar", "uniprocessor",
+"unique", "uniquely", "uniqueness", "uniquer", "Uniroyal", "unisex",
+"unison", "Unisys", "unit", "Unitarian", "Unitarianism", "Unitarians",
+"unitary", "unite", "united", "unitedly", "uniter", "unites", "uniting",
+"unitize", "unitizing", "unitless", "units", "unity", "univ", "Univac",
+"univalent", "univalve", "univariate", "universal", "universalisation",
+"universalise", "universalised", "universaliser", "universalisers",
+"universalising", "universalism", "universalist", "universalistic",
+"universality", "universalisation", "universalization", "universalize",
+"universalized", "universalizer", "universalizers", "universalizing",
+"universally", "universalness", "universe", "universes", "universities",
+"university", "univocal", "Unix", "uracil", "Urals", "uranium", "Uranus",
+"uranyl", "urate", "urea", "uremia", "uremic", "ureter", "urethane",
+"urethra", "urethral", "urethritis", "Urey", "Uri", "uric", "urinal",
+"urinalysis", "urinary", "urinated", "urinating", "urination", "urine",
+"urogenital", "urokinase", "urologist", "urologists", "urology",
+"Uruguay", "Uruguayan", "Uruguayans", "US", "USA", "usability",
+"usable", "usably", "usage",
+"usages", "use", "used", "useful", "usefulness", "usefully", "useless",
+"uselessly", "uselessness", "Usenet", "user", "users", "uses", "using",
+"usual", "usually", "usurer", "usurers", "usuress", "usurial", "usurious",
+"usurp", "usurpation", "usurped", "usurper", "usurping", "usurps",
+"usury", "Utah", "utensil", "utensils", "uterine", "uterus", "Utica",
+"utilitarian", "utilitarianism", "utilities", "utility", "utilizable",
+"utilization", "utilize", "utilized", "utilizes", "utilizing", "utopia",
+"utopian", "utopians", "utopias", "Utrecht", "Uttoxeter", "uvula",
+"uvular"])
+
+aAN = set(["f", "F", "FBI", "FDA", "heir", "heirdom", "heired",
+"heirer", "heiress", "heiring", "heirloom", "heirship", "honest",
+"honester", "honestly", "honesty", "honor", "honorable", "honorableness",
+"honorably", "honorarium", "honorary", "honored", "honorer", "honorific",
+"honoring", "honors", "honour", "honourable", "honourableness",
+"honourably", "honourarium", "honourary", "honoured", "honourer",
+"honourific", "honouring", "Honours", "hors", "hour", "hourglass", "hourlong",
+"hourly", "hours", "l", "L", "LCD", "m", "M", "MBA", "MP", "mpg", "mph",
+"MRI", "MSc", "MTV", "n", "N", "NBA", "NBC", "NFL", "NGO", "NHL", "r",
+"R", "s", "S", "SMS", "sos", "SOS", "SPF", "std", "STD", "SUV", "x",
+"X", "XML"])
+
+aB = set(["H", "habitual", "hallucination", "haute", "hauteur", "herb", "herbaceous", "herbal",
+"herbalist", "herbalism", "heroic", "hilarious", "historian", "historic", "historical",
+"homage", "homophone", "horrendous", "hospitable", "horrific", "hotel", "hypothesis", "Xmas"])
+
+def measurement(mnum, min, mout, mstr, decimal, remove):
+    if min == "ft" or min == "in" or min == "mi":
+        mnum = mnum.replace(" 1/2", ".5").replace(u" \xbd", ".5").replace(u"\xbd",".5")
+    m = calc("CONVERT_ADD", (float(eval(mnum.replace(remove, "").replace(decimal, ".").replace(u"\u2212", "-"))), min, mout))
+    a = list(set([str(calc("ROUND", (m, 0)))[:-2], str(calc("ROUND", (m, 1))), str(calc("ROUND", (m, 2))), str(m)])) # remove duplicated rounded items
+    a.sort(lambda x, y: len(x) - len(y)) # sort by string length
+    return join(a, mstr + "\n").replace(".", decimal).replace("-", u"\u2212") + mstr
+
+
+
diff --git a/dictionaries/hu_HU/Lightproof.py b/dictionaries/hu_HU/Lightproof.py
index 9f19420..8bb0e41 100644
--- a/dictionaries/hu_HU/Lightproof.py
+++ b/dictionaries/hu_HU/Lightproof.py
@@ -1,235 +1,26 @@
 # -*- encoding: UTF-8 -*-
 # Lightproof grammar checker for LibreOffice and OpenOffice.org
-# http://launchpad.net/lightproof
-# version 1.4.4 (2011-12-15)
-#
-# 2009-2011 (c) László Németh (nemeth at numbertext org), license: MPL 1.1 / GPLv3+ / LGPLv3+
+# 2009-2012 (c) László Németh (nemeth at numbertext org), license: MPL 1.1 / GPLv3+ / LGPLv3+
 
-import uno, unohelper, sys, traceback, re
+import uno, unohelper, os, sys, traceback
 from lightproof_impl_hu_HU import locales
 from lightproof_impl_hu_HU import pkg
+import lightproof_impl_hu_HU
 import lightproof_handler_hu_HU
-from string import join
 
 from com.sun.star.linguistic2 import XProofreader, XSupportedLocales
 from com.sun.star.linguistic2 import ProofreadingResult, SingleProofreadingError
 from com.sun.star.lang import XServiceInfo, XServiceName, XServiceDisplayName
 from com.sun.star.lang import Locale
-from com.sun.star.text.TextMarkupType import PROOFREADING
-from com.sun.star.beans import PropertyValue
-
-# loaded rules
-langrule = {}
-# ignored rules
-ignore = {}
-
-# cache for morphogical analyses
-analyses = {}
-stems = {}
-suggestions = {}
-
-# assign Calc functions
-calcfunc = None
-
-# check settings
-def option(lang, opt):
-    return lightproof_handler_hu_HU.get_option(lang.Language + "_" + lang.Country, opt)
-
-# filtering affix fields (ds, is, ts etc.)
-def onlymorph(st):
-    if st != None:
-        st = re.sub(r"^.*(st:|po:)", r"\1", st) # keep last word part
-        st = re.sub(r"\b(?=[dit][sp]:)","@", st) # and its affixes
-        st = re.sub(r"(?<!@)\b\w\w:\w+","", st).replace('@','').strip()
-    return st
-
-# if the pattern matches all analyses of the input word, 
-# return the last matched substring
-def _morph(rLoc, word, pattern, all, onlyaffix):
-    global analyses
-    if word == None:
-        return None
-    if word not in analyses:
-        x = spellchecker.spell(u"<?xml?><query type='analyze'><word>" + word + "</word></query>", rLoc, ())
-        if not x:
-            return None
-        t = x.getAlternatives()
-        if not t:
-            t = [""]
-        analyses[word] = t[0]
-    a = analyses[word].split("</a>")[:-1]
-    result = None
-    p = re.compile(pattern)
-    for i in a:
-        if onlyaffix:
-            i = onlymorph(i)
-        result = p.search(i)
-        if result:
-            result = result.group(0)
-            if not all:
-                return result
-        elif all:
-            return None
-    return result
-
-def morph(rLoc, word, pattern, all=True):
-    return _morph(rLoc, word, pattern, all, False)
-
-def affix(rLoc, word, pattern, all=True):
-    return _morph(rLoc, word, pattern, all, True)
-
-def spell(rLoc, word):
-    if word == None:
-        return None
-    return spellchecker.isValid(word, rLoc, ())
-
-# get the tuple of the stem of the word or an empty array
-def stem(rLoc, word):
-    global stems
-    if word == None:
-        return []
-    if not word in stems:
-        x = spellchecker.spell(u"<?xml?><query type='stem'><word>" + word + "</word></query>", rLoc, ())
-        if not x:
-            return []
-        t = x.getAlternatives()
-        if not t:
-            t = []
-        stems[word] = list(t)
-    return stems[word]
-
-# get the tuple of the morphological generation of a word or an empty array
-def generate(rLoc, word, example):
-    if word == None:
-        return []
-    x = spellchecker.spell(u"<?xml?><query type='generate'><word>" + word + "</word><word>" + example + "</word></query>", rLoc, ())
-    if not x:
-        return []
-    t = x.getAlternatives()
-    if not t:
-        t = []
-    return list(t)
-
-# get suggestions
-def suggest(rLoc, word):
-    global suggestions
-    if word == None:
-        return word
-    if word not in suggestions:
-        x = spellchecker.spell("_" + word, rLoc, ())
-        if not x:
-            return word
-        t = x.getAlternatives()
-        suggestions[word] = join(t, "\n")
-    return suggestions[word]
-
-# get the nth word of the input string or None
-def word(s, n):
-    a = re.match("(?u)( [-.\w%]+){" + str(n-1) + "}( [-.\w%]+)", s)
-    if not a:
-        return None
-    return a.group(2)[1:]
-
-# get the (-)nth word of the input string or None
-def wordmin(s, n):
-    a = re.search("(?u)([-.\w%]+ )([-.\w%]+ ){" + str(n-1) + "}$", s)
-    if not a:
-        return None
-    return a.group(1)[:-1]
-
-def calc(funcname, par):
-    global calcfunc
-    global SMGR
-    if calcfunc == None:
-        calcfunc = SMGR.createInstance( "com.sun.star.sheet.FunctionAccess")
-        if calcfunc == None:
-                return None
-    return calcfunc.callFunction(funcname, par)
-
-def proofread( nDocId, TEXT, LOCALE, nStartOfSentencePos, nSuggestedSentenceEndPos, rProperties ):
-    global ignore
-    aErrs = []
-    s = TEXT[nStartOfSentencePos:nSuggestedSentenceEndPos]
-    for i in get_rule(LOCALE):
-        if i[0] and not str(i[0]) in ignore:
-            for m in i[0].finditer(s):
-              if not i[3] or eval(i[3]):
-                aErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
-                aErr.nErrorStart        = nStartOfSentencePos + m.start(0) # nStartOfSentencePos
-                aErr.nErrorLength       = m.end(0) - m.start(0)
-                aErr.nErrorType         = PROOFREADING
-                aErr.aRuleIdentifier    = str(i[0])
-                iscap = (i[4] and m.group(0)[0:1].isupper())
-                if i[1][0:1] == "=":
-                        aErr.aSuggestions = tuple(cap(eval(i[1][1:]).split("\n"), iscap, LOCALE))
-                else:
-                        aErr.aSuggestions = tuple(cap(m.expand(i[1]).split("\n"), iscap, LOCALE))
-                comment = i[2]
-                if comment[0:1] == "=":
-                        comment = eval(comment[1:])
-                aErr.aShortComment      = comment.split("\\n")[0].strip()
-                aErr.aFullComment       = comment.split("\\n")[-1].strip()
-                if "://" in aErr.aFullComment:
-                        p = PropertyValue()
-                        p.Name = "FullCommentURL"
-                        p.Value = aErr.aFullComment
-                        aErr.aFullComment = aErr.aShortComment
-                        aErr.aProperties        = (p,)
-                else:
-                        aErr.aProperties        = ()
-                aErrs = aErrs + [aErr]
-    return tuple(aErrs)
-
-def cap(a, iscap, rLoc):
-    if iscap:
-        for i in range(0, len(a)):
-            if a[i][0:1] == "i":
-                if rLoc.Language == "tr" or rLoc.Language == "az":
-                    a[i] = u"\u0130" + a[i][1:]
-                elif a[i][1:2] == "j" and rLoc.Language == "nl":
-                    a[i] = "IJ" + a[i][2:]
-                else:
-                    a[i] = "I" + a[i][1:]
-            else:
-                a[i] = a[i].capitalize()
-    return a
-
-def get_rule(rLocale):
-        module = rLocale.Language
-        if rLocale.Country != "":
-                module = module + "_" + rLocale.Country
-        try:
-                return langrule[module]
-        except:
-                try:
-                        module = rLocale.Language
-                        return langrule[module]
-                except:
-                        try:
-                                d = __import__("lightproof_" + pkg)
-                        except:
-                                print "Error: missing language data: " + module
-                                return None
-        # compile regular expressions
-        for i in d.dic:
-                try:
-                        if re.compile("[(][?]iu[)]").match(i[0]):
-                                i += [True]
-                                i[0] = re.sub("[(][?]iu[)]", "(?u)", i[0])
-                        else:
-                                i += [False]
-                        i[0] = re.compile(i[0])
-                except:
-                        print "Lightproof: bad rule -- ", i[0]
-                        i[0] = None
-        langrule[module] = d.dic
-        return langrule[module]
+# reload in obj.reload in Python 3
+try:
+    from obj import reload
+except:
+    pass
 
 class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XServiceDisplayName, XSupportedLocales):
 
     def __init__( self, ctx, *args ):
-        global spellchecker
-        global SMGR
         self.ctx = ctx
         self.ServiceName = "com.sun.star.linguistic2.Proofreader"
         self.ImplementationName = "org.openoffice.comp.pyuno.Lightproof." + pkg
@@ -240,8 +31,9 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
             self.locales += [Locale(l[0], l[1], l[2])]
         self.locales = tuple(self.locales)
         currentContext = uno.getComponentContext()
-        SMGR = currentContext.ServiceManager
-        spellchecker = SMGR.createInstanceWithContext("com.sun.star.linguistic2.SpellChecker", currentContext)
+        lightproof_impl_hu_HU.SMGR = currentContext.ServiceManager
+        lightproof_impl_hu_HU.spellchecker = \
+            lightproof_impl_hu_HU.SMGR.createInstanceWithContext("com.sun.star.linguistic2.SpellChecker", currentContext)
         lightproof_handler_hu_HU.load(currentContext)
 
     # XServiceName method implementations
@@ -250,13 +42,13 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
 
     # XServiceInfo method implementations
     def getImplementationName (self):
-                return self.ImplementationName
+        return self.ImplementationName
 
     def supportsService(self, ServiceName):
-                return (ServiceName in self.SupportedServiceNames)
+        return (ServiceName in self.SupportedServiceNames)
 
     def getSupportedServiceNames (self):
-                return self.SupportedServiceNames
+        return self.SupportedServiceNames
 
     # XSupportedLocales
     def hasLocale(self, aLocale):
@@ -282,6 +74,48 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
         aRes.aLocale = rLocale
         aRes.nStartOfSentencePosition = nStartOfSentencePos
         aRes.nStartOfNextSentencePosition = nSuggestedSentenceEndPos
+        aRes.aProperties = ()
+        aRes.xProofreader = self
+        aRes.aErrors = ()
+        if len(rProperties) > 0 and rProperties[0].Name == "Update":
+            try:
+                import lightproof_compile_hu_HU
+                try:
+                    code = lightproof_compile_hu_HU.c(rProperties[0].Value, rLocale.Language, True)
+                except Exception as e:
+                    aRes.aText, aRes.nStartOfSentencePosition = e
+                    return aRes
+                path = lightproof_impl_hu_HU.get_path()
+                f = open(path.replace("_impl", ""), "w")
+                f.write("dic = %s" % code["rules"])
+                f.close()
+                if pkg in lightproof_impl_hu_HU.langrule:
+                    mo = lightproof_impl_hu_HU.langrule[pkg]
+                    reload(mo)
+                    lightproof_impl_hu_HU.compile_rules(mo.dic)
+                    lightproof_impl_hu_HU.langrule[pkg] = mo
+                if "code" in code:
+                    f = open(path, "r")
+                    ft = f.read()
+                    f.close()
+                    f = open(path, "w")
+                    f.write(ft[:ft.find("# [code]") + 8] + "\n" + code["code"])
+                    f.close()
+                    try:
+                        reload(lightproof_impl_hu_HU)
+                    except Exception as e:
+                        aRes.aText = e.args[0]
+                        if e.args[1][3] == "": # "expected an indented block" (end of file)
+                            aRes.nStartOfSentencePosition = len(rText.split("\n"))
+                        else:
+                            aRes.nStartOfSentencePosition = rText.split("\n").index(e.args[1][3][:-1]) + 1
+                        return aRes
+                aRes.aText = ""
+                return aRes
+            except:
+                if 'PYUNO_LOGLEVEL' in os.environ:
+                    print(traceback.format_exc())
+
         l = rText[nSuggestedSentenceEndPos:nSuggestedSentenceEndPos+1]
         while l == " ":
             aRes.nStartOfNextSentencePosition = aRes.nStartOfNextSentencePosition + 1
@@ -291,44 +125,31 @@ class Lightproof( unohelper.Base, XProofreader, XServiceInfo, XServiceName, XSer
         aRes.nBehindEndOfSentencePosition = aRes.nStartOfNextSentencePosition
 
         try:
-            aRes.aErrors = proofread( nDocId, rText, rLocale, \
+            aRes.aErrors = lightproof_impl_hu_HU.proofread( nDocId, rText, rLocale, \
                 nStartOfSentencePos, aRes.nBehindEndOfSentencePosition, rProperties)
-        except:
-            # traceback.print_exc(file=sys.stdout)
-            aRes.aErrors = ()
-        aRes.aProperties = ()
-        aRes.xProofreader = self
+        except Exception as e:
+            if len(rProperties) > 0 and rProperties[0].Name == "Debug" and len(e.args) == 2:
+                aRes.aText, aRes.nStartOfSentencePosition = e
+            else:
+                if 'PYUNO_LOGLEVEL' in os.environ:
+                    print(traceback.format_exc())
         return aRes
 
     def ignoreRule(self, rid, aLocale):
-        global ignore
-        ignore[rid] = 1
+        lightproof_impl_hu_HU.ignore[rid] = 1
 
     def resetIgnoreRules(self):
-        global ignore
-        ignore = {}
+        lightproof_impl_hu_HU.ignore = {}
 
     # XServiceDisplayName
     def getServiceDisplayName(self, aLocale):
-        return "Lightproof Grammar Checker (" + pkg + ")"
+        return lightproof_impl_hu_HU.name
 
 g_ImplementationHelper = unohelper.ImplementationHelper()
 g_ImplementationHelper.addImplementation( Lightproof, \
-        "org.openoffice.comp.pyuno.Lightproof." + pkg,
-        ("com.sun.star.linguistic2.Proofreader",),)
+    "org.openoffice.comp.pyuno.Lightproof." + pkg,
+    ("com.sun.star.linguistic2.Proofreader",),)
 
 g_ImplementationHelper.addImplementation( lightproof_handler_hu_HU.LightproofOptionsEventHandler, \
-        "org.openoffice.comp.pyuno.LightproofOptionsEventHandler." + pkg,
-        ("com.sun.star.awt.XContainerWindowEventHandler",),)
-
-abbrev=re.compile(ur"(?i)\\b([a-z\xf6\xfc\xf3\u0151\xfa\xe9\xe1\u0171\xed\xd6\xdc\xd3\u0150\xda\xc9\xc1\u0170\xcd]|\xc1e|\xc1ht|AkH|al|\xe1lt|\xe1pr|aug|Avtv|bek|Bp|br|bt|Btk|cca|ci(i|ii|v|x)?|cl(i|ii|iii|iv|ix|v|vi|vii|viii|x|xi|xii|xiii|xiv|xix|xv|xvi|xvii|xviii|xx|xxi|xxii|xxiii|xxiv|xxix|xxv|xxvi|xxvii|xxviii|xxx|xxxi|xxxii|xxxiii|xxxiv|xxxix|xxxv|xxxvi|xxxvii|xxxviii)?|Co|cv(i|ii|iii)?|cx(c|ci|cii|ciii|civ|cix|cv|cvi|cvii|cviii|i|ii|iii|iv|ix|l|li|lii|liii|liv|lix|lv|lvi|lvii|lviii|v|vi|vii|viii|x|xi|xii|xiii|xiv|xix|xv|xvi|xvii|xviii|xx|xxi|xxii|xxiii|xxiv|xxix|xxv|xxvi|xxvii|xxviii)?|cs|Csjt|Cstv|cs\xfct|dec|dk|dny|dr|du|dz(s)?|egy|\xe9k|\xc9Ksz|em|\xe9ny|\xc9pt|\xe9rk|etc|Etv|e\xfc|ev|\xe9vf|febr|felv|Flt|ford|f\u0151isk|fsz(la|t)?|Ftv|gimn|g\xf6r|gr|Gt|gy|Gyvt|habil|hg|hiv|Hjt|honv|Hpt|hrsz|hsz|Hszt|htb|id|ifj|ig(h)?|ii(i)?|ill|Inc|ind|isk|iv|ix|izr|jan|jegyz|j\xfal|j\xfan|kat|kb|Kbt|ker|kft|kgy|kht|kir|kiv|Kjt|kk(t)?|koll|korm|k\xf6v|kp|Kr|krt|Kt(v)?|ld|li(i|ii|v|x
 )?|Ltd|ltp|Ltv|luth|lv(i|ii|iii)?|lx(i|ii|iii|iv|ix|v|vi|vii|viii|x|xi|xii|xiii|xiv|xix|xv|xvi|xvii|xviii|xx|xxi|xxii|xxiii|xxiv|xxix|xxv|xxvi|xxvii|xxviii)?|ly|m\xe1j|m\xe1rc|mat|max|mb|megh|megj|MHSz|min|mk|Mo|Mt|NB|nov|ny(\xe1)?|Nyilv|nyrt|okl|okt|olv|op|orsz|ort|ov(h)?|\xf6ssz|\xd6tv|\xf6zv|Pf|pl(d)?|prof|prot|Ptk|pu|ref|rk(p)?|r\xf3m|r\xf6v|rt|sgt|spec|stb|sz(ept|erk)?|Szjt|szoc|Szt(v)?|sz\xfcl|Tbj|tc|tel|tkp|tszf|tvr|ty|ua|ui|\xfam|\xfan|uo|Ve|Vhr|vi(i|ii)?|v\xf6|vsz|Vt(v)?|xc(i|ii|iii|iv|ix|v|vi|vii|viii)?|xi(i|ii|v|x)?|xl(i|ii|iii|iv|ix|v|vi|vii|viii)?|xv(i|ii|iii)?|xx(i|ii|iii|iv|ix|v|vi|vii|viii|x|xi|xii|xiii|xiv|xix|xv|xvi|xvii|xviii)?|zrt)\\.")
-
-# pattern for paragraph checking
-paralcap = re.compile(u"(?u)^[a-z\xf6\xfc\xf3\u0151\xfa\xe9\xe1\u0171\xed].*[.?!] [A-Z\xd6\xdc\xd3\u0150\xda\xc9\xc1\u0170\xcd].*[.?!][)\u201d]?$")
-
-
-def measurement(mnum, min, mout, mstr):
-    m = calc("CONVERT_ADD", (float(mnum.replace(",", ".").replace(u"\u2212", "-")), min, mout))
-    a = list(set([str(calc("ROUND", (m, 0)))[:-2], str(calc("ROUND", (m, 1))), str(calc("ROUND", (m, 2))), str(m)])) # remove duplicated rounded items
-    a.sort(lambda x, y: len(x) - len(y)) # sort by string length
-    return join(a, mstr + "\n").replace(".", ",").replace("-", u"\u2212") + mstr
+    "org.openoffice.comp.pyuno.LightproofOptionsEventHandler." + pkg,
+    ("com.sun.star.awt.XContainerWindowEventHandler",),)
diff --git a/dictionaries/hu_HU/README_lightproof_hu_HU.txt b/dictionaries/hu_HU/README_lightproof_hu_HU.txt
index e10ee4d..6c9f4f8 100644
--- a/dictionaries/hu_HU/README_lightproof_hu_HU.txt
+++ b/dictionaries/hu_HU/README_lightproof_hu_HU.txt
@@ -1,8 +1,3 @@
-Hungarian grammar checker extension for OpenOffice.org
-
-(developed by the Lightproof grammar checker extension generator,
-see http://launchpad.net/lightproof)
-
-Hungarian grammar checker rules
-
-2009-2011 (c) László Németh, license: MPL 1.1 / GPLv3+ / LGPLv3+
+Hungarian sentence checker for LibreOffice
+see git://anongit.freedesktop.org/libreoffice/lightproof
+2009-2012 (c) László Németh, license: MPL 1.1 / GPLv3+ / LGPLv3+
diff --git a/dictionaries/hu_HU/pythonpath/lightproof_handler_hu_HU.py b/dictionaries/hu_HU/pythonpath/lightproof_handler_hu_HU.py
index 1716afb..a313d5f 100644
--- a/dictionaries/hu_HU/pythonpath/lightproof_handler_hu_HU.py
+++ b/dictionaries/hu_HU/pythonpath/lightproof_handler_hu_HU.py
@@ -1,8 +1,6 @@
 import uno
 import unohelper
-
-from lightproof_opts_hu_HU import lopts
-from lightproof_opts_hu_HU import lopts_default
+import lightproof_opts_hu_HU
 from lightproof_impl_hu_HU import pkg
 
 from com.sun.star.lang import XServiceInfo
@@ -14,7 +12,7 @@ options = {}
 def load(context):
     try:
         l = LightproofOptionsEventHandler(context)
-        for i in lopts:
+        for i in lightproof_opts_hu_HU.lopts:
             l.load(i)
     except:
         pass
@@ -24,9 +22,9 @@ def get_option(page, option):
         return options[page + "," + option]
     except:
         try:
-                return options[page[:2] + "," + option]
+            return options[page[:2] + "," + option]
         except:
-                return 0
+            return 0
 
 def set_option(page, option, value):
     options[page + "," + option] = int(value)
@@ -45,7 +43,7 @@ class LightproofOptionsEventHandler( unohelper.Base, XServiceInfo, XContainerWin
     # XContainerWindowEventHandler
     def callHandlerMethod(self, aWindow, aEventObject, sMethod):
         if sMethod == "external_event":
-                return self.handleExternalEvent(aWindow, aEventObject)
+            return self.handleExternalEvent(aWindow, aEventObject)
 
     def getSupportedMethodNames(self):
         return ("external_event", )
@@ -53,65 +51,65 @@ class LightproofOptionsEventHandler( unohelper.Base, XServiceInfo, XContainerWin
     def handleExternalEvent(self, aWindow, aEventObject):
         sMethod = aEventObject
         if sMethod == "ok":
-                self.saveData(aWindow)
+            self.saveData(aWindow)
         elif sMethod == "back" or sMethod == "initialize":
-                self.loadData(aWindow)
+            self.loadData(aWindow)
         return True
 
     def load(self, sWindowName):
         child = self.getChild(sWindowName)
-        for i in lopts[sWindowName]:
-                sValue = child.getPropertyValue(i)
-                if sValue == '':
-                    if i in lopts_default[sWindowName]:
-                        sValue = 1
-                    else:
-                        sValue = 0
-                set_option(sWindowName, i, sValue)
+        for i in lightproof_opts_hu_HU.lopts[sWindowName]:
+            sValue = child.getPropertyValue(i)
+            if sValue == '':
+                if i in lightproof_opts_hu_HU.lopts_default[sWindowName]:
+                    sValue = 1
+                else:
+                    sValue = 0
+            set_option(sWindowName, i, sValue)
 
     def loadData(self, aWindow):
         sWindowName = self.getWindowName(aWindow)
         if (sWindowName == None):
-                return
+            return
         child = self.getChild(sWindowName)
-        for i in lopts[sWindowName]:
-                sValue = child.getPropertyValue(i)
-                if sValue == '':
-                    if i in lopts_default[sWindowName]:
-                        sValue = 1
-                    else:
-                        sValue = 0
-                xControl = aWindow.getControl(i)
-                xControl.State = sValue
-                set_option(sWindowName, i, sValue)
+        for i in lightproof_opts_hu_HU.lopts[sWindowName]:
+            sValue = child.getPropertyValue(i)
+            if sValue == '':
+                if i in lightproof_opts_hu_HU.lopts_default[sWindowName]:
+                    sValue = 1
+                else:
+                    sValue = 0
+            xControl = aWindow.getControl(i)
+            xControl.State = sValue
+            set_option(sWindowName, i, sValue)
 
     def saveData(self, aWindow):
         sWindowName = self.getWindowName(aWindow)
         if (sWindowName == None):
-                return
+            return
         child = self.getChild(sWindowName)
-        for i in lopts[sWindowName]:
-                xControl = aWindow.getControl(i)
-                sValue = xControl.State
-                child.setPropertyValue(i, str(sValue))
-                set_option(sWindowName, i, sValue)
+        for i in lightproof_opts_hu_HU.lopts[sWindowName]:
+            xControl = aWindow.getControl(i)
+            sValue = xControl.State
+            child.setPropertyValue(i, str(sValue))
+            set_option(sWindowName, i, sValue)
         self.commitChanges()
 
     def getWindowName(self, aWindow):
         sName = aWindow.getModel().Name
-        if sName in lopts:
-                return sName
+        if sName in lightproof_opts_hu_HU.lopts:
+            return sName
         return None
 
     # XServiceInfo method implementations
     def getImplementationName (self):
-                return self.ImplementationName
+        return self.ImplementationName
 
     def supportsService(self, ServiceName):
-                return (ServiceName in self.services)
+        return (ServiceName in self.services)
 
     def getSupportedServiceNames (self):
-                return self.services
+        return self.services
 
     def getChild(self, name):
         return self.node.getByName(name)
diff --git a/dictionaries/hu_HU/pythonpath/lightproof_hu_HU.py b/dictionaries/hu_HU/pythonpath/lightproof_hu_HU.py
index 777b972..f864fb0 100644
--- a/dictionaries/hu_HU/pythonpath/lightproof_hu_HU.py
+++ b/dictionaries/hu_HU/pythonpath/lightproof_hu_HU.py
@@ -1 +1,3 @@

... etc. - the rest is truncated


More information about the Libreoffice-commits mailing list